diff --git a/README.md b/README.md index 895a5603..3fe99efc 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,53 @@ +### Change log [2026-02-11 11:00:15] +1. Item Updated: `verify_schema` (from version: `1.0.0` to `1.0.0`) + +### Change log [2026-02-11 11:00:09] +1. Item Updated: `agent_deployer` (from version: `1.0.0` to `1.0.0`) +2. Item Updated: `histogram_data_drift` (from version: `1.0.0` to `1.0.0`) +3. Item Updated: `openai_proxy_app` (from version: `1.0.0` to `1.0.0`) +4. Item Updated: `oai_spo` (from version: `1.0.0` to `1.0.0`) +5. Item Updated: `vllm_module` (from version: `1.0.0` to `1.0.0`) +6. Item Updated: `count_events` (from version: `1.0.0` to `1.0.0`) +7. Item Updated: `evidently_iris` (from version: `1.0.0` to `1.0.0`) + +### Change log [2026-02-11 11:00:00] +1. Item Updated: `test_classifier` (from version: `1.1.0` to `1.1.0`) +2. Item Updated: `sklearn_classifier` (from version: `1.2.0` to `1.2.0`) +3. Item Updated: `model_server_tester` (from version: `1.1.0` to `1.1.0`) +4. Item Updated: `azureml_serving` (from version: `1.1.0` to `1.1.0`) +5. Item Updated: `describe_dask` (from version: `1.2.0` to `1.2.0`) +6. Item Updated: `batch_inference` (from version: `1.8.0` to `1.8.0`) +7. Item Updated: `v2_model_server` (from version: `1.2.0` to `1.2.0`) +8. Item Updated: `gen_class_data` (from version: `1.3.0` to `1.3.0`) +9. Item Updated: `send_email` (from version: `1.2.0` to `1.2.0`) +10. Item Updated: `tf2_serving` (from version: `1.1.0` to `1.1.0`) +11. Item Updated: `aggregate` (from version: `1.4.0` to `1.4.0`) +12. Item Updated: `open_archive` (from version: `1.2.0` to `1.2.0`) +13. Item Updated: `describe` (from version: `1.4.0` to `1.4.0`) +14. Item Updated: `v2_model_tester` (from version: `1.1.0` to `1.1.0`) +15. Item Updated: `text_to_audio_generator` (from version: `1.3.0` to `1.3.0`) +16. Item Updated: `pii_recognizer` (from version: `0.4.0` to `0.4.0`) +17. Item Updated: `github_utils` (from version: `1.1.0` to `1.1.0`) +18. Item Updated: `sklearn_classifier_dask` (from version: `1.1.1` to `1.1.1`) +19. Item Updated: `azureml_utils` (from version: `1.4.0` to `1.4.0`) +20. Item Updated: `question_answering` (from version: `0.5.0` to `0.5.0`) +21. Item Updated: `structured_data_generator` (from version: `1.6.0` to `1.6.0`) +22. Item Updated: `arc_to_parquet` (from version: `1.5.0` to `1.5.0`) +23. Item Updated: `silero_vad` (from version: `1.4.0` to `1.4.0`) +24. Item Updated: `load_dataset` (from version: `1.2.0` to `1.2.0`) +25. Item Updated: `auto_trainer` (from version: `1.8.0` to `1.8.0`) +26. Item Updated: `feature_selection` (from version: `1.6.0` to `1.6.0`) +27. Item Updated: `translate` (from version: `0.3.0` to `0.3.0`) +28. Item Updated: `describe_spark` (from version: `1.1.0` to `1.1.0`) +29. Item Updated: `pyannote_audio` (from version: `1.3.0` to `1.3.0`) +30. Item Updated: `onnx_utils` (from version: `1.4.0` to `1.4.0`) +31. Item Updated: `batch_inference_v2` (from version: `2.6.0` to `2.6.0`) +32. Item Updated: `transcribe` (from version: `1.2.0` to `1.2.0`) +33. Item Updated: `model_server` (from version: `1.2.0` to `1.2.0`) +34. Item Updated: `mlflow_utils` (from version: `1.2.0` to `1.2.0`) +35. Item Updated: `noise_reduction` (from version: `1.1.0` to `1.1.0`) +36. Item Updated: `hugging_face_serving` (from version: `1.1.0` to `1.1.0`) + ### Change log [2026-01-26 14:39:32] 1. Item Updated: `verify_schema` (from version: `1.0.0` to `1.0.0`) diff --git a/catalog.json b/catalog.json index d5da2fb7..1146627a 100644 --- a/catalog.json +++ b/catalog.json @@ -1 +1 @@ -{"functions": {"development": {"tf2_serving": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0"}}, "load_dataset": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "model_server_tester": {"latest": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "feature_selection": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0"}, "1.6.0": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0"}}, "aggregate": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}}, "describe": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}}, "model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0"}}, "describe_spark": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "gen_class_data": {"latest": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0"}}, "open_archive": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "send_email": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "v2_model_tester": {"latest": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "arc_to_parquet": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0"}, "1.5.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0"}}, "github_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "v2_model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0"}}, "onnx_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.2", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.6.0", "torchvision~=0.21.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.2", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.6.0", "torchvision~=0.21.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.3.0"}}, "azureml_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true}, "1.4.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true}}, "auto_trainer": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0"}, "1.8.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0"}}, "azureml_serving": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0"}}, "batch_inference": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0"}, "1.8.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0"}}, "hugging_face_serving": {"latest": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false}, "1.1.0": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false}}, "transcribe": {"latest": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0"}}, "question_answering": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0"}, "0.5.0": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0"}}, "pii_recognizer": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false}, "0.4.0": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false}}, "batch_inference_v2": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0"}, "2.6.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0"}}, "translate": {"latest": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true}, "0.3.0": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true}}, "structured_data_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0"}, "1.6.0": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0"}}, "text_to_audio_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false}}, "silero_vad": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0"}}, "pyannote_audio": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0"}}, "noise_reduction": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false}, "1.1.0": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false}}, "mlflow_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "utils"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=2.22", "lightgbm", "xgboost"]}, "url": "", "version": "1.1.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0"}}}, "master": {"tf2_serving": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0"}}, "load_dataset": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "model_server_tester": {"latest": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "feature_selection": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0"}, "1.6.0": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0"}}, "aggregate": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}}, "describe": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}}, "model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0"}}, "describe_spark": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "gen_class_data": {"latest": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0"}}, "open_archive": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "send_email": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "v2_model_tester": {"latest": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "arc_to_parquet": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0"}, "1.5.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0"}}, "github_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "v2_model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0"}}, "onnx_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.2", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.6.0", "torchvision~=0.21.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.2", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.6.0", "torchvision~=0.21.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.3.0"}}, "azureml_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true}, "1.4.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true}}, "auto_trainer": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0"}, "1.8.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0"}}, "azureml_serving": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0"}}, "batch_inference": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0"}, "1.8.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0"}}, "hugging_face_serving": {"latest": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false}, "1.1.0": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false}}, "question_answering": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0"}, "0.5.0": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0"}}, "transcribe": {"latest": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0"}}, "pii_recognizer": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false}, "0.4.0": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false}}, "batch_inference_v2": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0"}, "2.6.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0"}}, "translate": {"latest": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true}, "0.3.0": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true}}, "structured_data_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0"}, "1.6.0": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0"}}, "text_to_audio_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false}}, "silero_vad": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0"}}, "pyannote_audio": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0"}}, "mlflow_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=2.22", "lightgbm", "xgboost"]}, "url": "", "version": "1.1.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0"}}, "noise_reduction": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false}, "1.1.0": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false}}}}, "modules": {"development": {"count_events": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Count events in each time window", "example": "count_events.ipynb", "generationDate": "2025-09-16:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "count_events", "spec": {"filename": "count_events.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Count events in each time window", "example": "count_events.ipynb", "generationDate": "2025-09-16:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "count_events", "spec": {"filename": "count_events.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}}, "histogram_data_drift": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Model-monitoring application for detecting and visualizing data drift", "example": "histogram_data_drift.ipynb", "generationDate": "2025-11-06:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "histogram_data_drift", "spec": {"filename": "histogram_data_drift.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["plotly~=5.23", "pandas"]}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Model-monitoring application for detecting and visualizing data drift", "example": "histogram_data_drift.ipynb", "generationDate": "2025-11-06:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "histogram_data_drift", "spec": {"filename": "histogram_data_drift.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["plotly~=5.23", "pandas"]}, "version": "1.0.0"}}, "openai_proxy_app": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "OpenAI application runtime based on fastapi", "example": "openai_proxy_app.ipynb", "generationDate": "2025-11-11:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "openai_proxy_app", "spec": {"filename": "openai_proxy_app.py", "image": "mlrun/mlrun", "requirements": ["fastapi==0.124.0", "uvicorn[standard]==0.38.0", "gunicorn==23.0.0", "requests==2.32.5"], "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["genai"], "description": "OpenAI application runtime based on fastapi", "example": "openai_proxy_app.ipynb", "generationDate": "2025-11-11:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "openai_proxy_app", "spec": {"filename": "openai_proxy_app.py", "image": "mlrun/mlrun", "requirements": ["fastapi==0.124.0", "uvicorn[standard]==0.38.0", "gunicorn==23.0.0", "requests==2.32.5"], "kind": "generic"}, "version": "1.0.0"}}, "evidently_iris": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Demonstrates Evidently integration in MLRun for data quality and drift monitoring using the Iris dataset", "example": "evidently_iris.ipynb", "generationDate": "2025-11-09:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "evidently_iris", "spec": {"filename": "evidently_iris.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["scikit-learn~=1.5.2", "evidently~=0.7.5", "pandas", "sniffio~=1.3.0"]}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Demonstrates Evidently integration in MLRun for data quality and drift monitoring using the Iris dataset", "example": "evidently_iris.ipynb", "generationDate": "2025-11-09:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "evidently_iris", "spec": {"filename": "evidently_iris.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["scikit-learn~=1.5.2", "evidently~=0.7.5", "pandas", "sniffio~=1.3.0"]}, "version": "1.0.0"}}, "agent_deployer": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Helper for serving function deploy of an AI agents using MLRun", "example": "agent_deployer.ipynb", "generationDate": "2025-12-07", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "agent_deployer", "spec": {"filename": "agent_deployer.py", "image": "mlrun/mlrun", "kind": "generic", "requirements": null}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Helper for serving function deploy of an AI agents using MLRun", "example": "agent_deployer.ipynb", "generationDate": "2025-12-07", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "agent_deployer", "spec": {"filename": "agent_deployer.py", "image": "mlrun/mlrun", "kind": "generic", "requirements": null}, "version": "1.0.0"}}, "vllm_module": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "Deploys a vLLM OpenAI-compatible LLM server as an MLRun application runtime, with configurable GPU usage, node selection, tensor parallelism, and runtime flags.", "example": "vllm_module.ipynb", "generationDate": "2025-12-17:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "vllm_module", "spec": {"filename": "vllm_module.py", "image": "mlrun/mlrun", "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["genai"], "description": "Deploys a vLLM OpenAI-compatible LLM server as an MLRun application runtime, with configurable GPU usage, node selection, tensor parallelism, and runtime flags.", "example": "vllm_module.ipynb", "generationDate": "2025-12-17:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "vllm_module", "spec": {"filename": "vllm_module.py", "image": "mlrun/mlrun", "kind": "generic"}, "version": "1.0.0"}}, "oai_spo": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "OAI SPO use case for industrial optimization", "example": "oai_spo.ipynb", "generationDate": "2026-01-26:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.9.2", "name": "oai_spo", "spec": {"filename": "oai_spo.py", "image": "mlrun/mlrun", "kind": "generic", "requirements": null}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "OAI SPO use case for industrial optimization", "example": "oai_spo.ipynb", "generationDate": "2026-01-26:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.9.2", "name": "oai_spo", "spec": {"filename": "oai_spo.py", "image": "mlrun/mlrun", "kind": "generic", "requirements": null}, "version": "1.0.0"}}}, "master": {"count_events": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Count events in each time window", "example": "count_events.ipynb", "generationDate": "2025-09-16:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "count_events", "spec": {"filename": "count_events.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Count events in each time window", "example": "count_events.ipynb", "generationDate": "2025-09-16:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "count_events", "spec": {"filename": "count_events.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}}, "histogram_data_drift": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Model-monitoring application for detecting and visualizing data drift", "example": "histogram_data_drift.ipynb", "generationDate": "2025-11-06:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "histogram_data_drift", "spec": {"filename": "histogram_data_drift.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["plotly~=5.23", "pandas"]}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Model-monitoring application for detecting and visualizing data drift", "example": "histogram_data_drift.ipynb", "generationDate": "2025-11-06:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "histogram_data_drift", "spec": {"filename": "histogram_data_drift.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["plotly~=5.23", "pandas"]}, "version": "1.0.0"}}, "openai_proxy_app": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "OpenAI application runtime based on fastapi", "example": "openai_proxy_app.ipynb", "generationDate": "2025-11-11:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "openai_proxy_app", "spec": {"filename": "openai_proxy_app.py", "image": "mlrun/mlrun", "requirements": ["fastapi==0.124.0", "uvicorn[standard]==0.38.0", "gunicorn==23.0.0", "requests==2.32.5"], "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["genai"], "description": "OpenAI application runtime based on fastapi", "example": "openai_proxy_app.ipynb", "generationDate": "2025-11-11:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "openai_proxy_app", "spec": {"filename": "openai_proxy_app.py", "image": "mlrun/mlrun", "requirements": ["fastapi==0.124.0", "uvicorn[standard]==0.38.0", "gunicorn==23.0.0", "requests==2.32.5"], "kind": "generic"}, "version": "1.0.0"}}, "evidently_iris": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Demonstrates Evidently integration in MLRun for data quality and drift monitoring using the Iris dataset", "example": "evidently_iris.ipynb", "generationDate": "2025-11-09:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "evidently_iris", "spec": {"filename": "evidently_iris.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["scikit-learn~=1.5.2", "evidently~=0.7.5", "pandas", "sniffio~=1.3.0"]}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Demonstrates Evidently integration in MLRun for data quality and drift monitoring using the Iris dataset", "example": "evidently_iris.ipynb", "generationDate": "2025-11-09:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "evidently_iris", "spec": {"filename": "evidently_iris.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["scikit-learn~=1.5.2", "evidently~=0.7.5", "pandas", "sniffio~=1.3.0"]}, "version": "1.0.0"}}, "agent_deployer": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Helper for serving function deploy of an AI agents using MLRun", "example": "agent_deployer.ipynb", "generationDate": "2025-12-07", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "agent_deployer", "spec": {"filename": "agent_deployer.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Helper for serving function deploy of an AI agents using MLRun", "example": "agent_deployer.ipynb", "generationDate": "2025-12-07", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "agent_deployer", "spec": {"filename": "agent_deployer.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}}, "vllm_module": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "Deploys a vLLM OpenAI-compatible LLM server as an MLRun application runtime, with configurable GPU usage, node selection, tensor parallelism, and runtime flags.", "example": "vllm_module.ipynb", "generationDate": "2025-12-17:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "vllm_module", "spec": {"filename": "vllm_module.py", "image": "mlrun/mlrun", "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["genai"], "description": "Deploys a vLLM OpenAI-compatible LLM server as an MLRun application runtime, with configurable GPU usage, node selection, tensor parallelism, and runtime flags.", "example": "vllm_module.ipynb", "generationDate": "2025-12-17:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "vllm_module", "spec": {"filename": "vllm_module.py", "image": "mlrun/mlrun", "kind": "generic"}, "version": "1.0.0"}}}}, "steps": {"development": {"verify_schema": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "model-serving", "utilities"], "description": "Verifies the event is aligned with the provided schema", "example": "verify_schema.ipynb", "generationDate": "2025-12-29:11-59", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "verify_schema", "className": "VerifySchema", "defaultHandler": null, "spec": {"filename": "verify_schema.py", "image": "mlrun/mlrun", "requirements": null, "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["data-preparation", "model-serving", "utilities"], "description": "Verifies the event is aligned with the provided schema", "example": "verify_schema.ipynb", "generationDate": "2025-12-29:11-59", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "verify_schema", "className": "VerifySchema", "defaultHandler": null, "spec": {"filename": "verify_schema.py", "image": "mlrun/mlrun", "requirements": null, "kind": "generic"}, "version": "1.0.0"}}}, "master": {"verify_schema": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "model-serving", "utilities"], "description": "Verifies the event is aligned with the provided schema", "example": "verify_schema.ipynb", "generationDate": "2025-12-29:11-59", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "verify_schema", "className": "VerifySchema", "defaultHandler": null, "spec": {"filename": "verify_schema.py", "image": "mlrun/mlrun", "requirements": null, "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["data-preparation", "model-serving", "utilities"], "description": "Verifies the event is aligned with the provided schema", "example": "verify_schema.ipynb", "generationDate": "2025-12-29:11-59", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "verify_schema", "className": "VerifySchema", "defaultHandler": null, "spec": {"filename": "verify_schema.py", "image": "mlrun/mlrun", "requirements": null, "kind": "generic"}, "version": "1.0.0"}}}}} \ No newline at end of file +{"functions": {"development": {"tf2_serving": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0"}}, "load_dataset": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "model_server_tester": {"latest": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "feature_selection": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0"}, "1.6.0": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0"}}, "aggregate": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}}, "describe": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}}, "model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0"}}, "describe_spark": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "gen_class_data": {"latest": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0"}}, "open_archive": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "send_email": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "v2_model_tester": {"latest": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "arc_to_parquet": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0"}, "1.5.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0"}}, "github_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "v2_model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0"}}, "onnx_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.8.0", "torchvision~=0.23.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.4.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.2", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.6.0", "torchvision~=0.21.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.3.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.8.0", "torchvision~=0.23.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.4.0"}}, "azureml_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true}, "1.4.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true}}, "auto_trainer": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0"}, "1.8.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0"}}, "azureml_serving": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0"}}, "batch_inference": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0"}, "1.8.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0"}}, "hugging_face_serving": {"latest": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false}, "1.1.0": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false}}, "transcribe": {"latest": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0"}}, "question_answering": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0"}, "0.5.0": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0"}}, "pii_recognizer": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false}, "0.4.0": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false}}, "batch_inference_v2": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0"}, "2.6.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0"}}, "translate": {"latest": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true}, "0.3.0": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true}}, "structured_data_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0"}, "1.6.0": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0"}}, "text_to_audio_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false}}, "silero_vad": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0"}}, "pyannote_audio": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0"}}, "noise_reduction": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false}, "1.1.0": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false}}, "mlflow_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "utils"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=2.22", "lightgbm", "xgboost"]}, "url": "", "version": "1.1.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0"}}}, "master": {"tf2_serving": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0"}}, "load_dataset": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "model_server_tester": {"latest": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "feature_selection": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0"}, "1.6.0": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0"}}, "aggregate": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}}, "describe": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0"}}, "model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0"}}, "describe_spark": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "gen_class_data": {"latest": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0"}}, "open_archive": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "send_email": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0"}}, "v2_model_tester": {"latest": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "arc_to_parquet": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0"}, "1.5.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0"}}, "github_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0"}}, "v2_model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0"}}, "onnx_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.2", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.6.0", "torchvision~=0.21.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.2", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.6.0", "torchvision~=0.21.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.3.0"}}, "azureml_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true}, "1.4.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true}}, "auto_trainer": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0"}, "1.8.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0"}}, "azureml_serving": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0"}}, "batch_inference": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0"}, "1.8.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0"}}, "hugging_face_serving": {"latest": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false}, "1.1.0": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false}}, "question_answering": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0"}, "0.5.0": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0"}}, "transcribe": {"latest": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0"}}, "pii_recognizer": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false}, "0.4.0": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false}}, "batch_inference_v2": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0"}, "2.6.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0"}}, "translate": {"latest": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true}, "0.3.0": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true}}, "structured_data_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0"}, "1.6.0": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0"}}, "text_to_audio_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false}}, "silero_vad": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0"}, "1.4.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0"}}, "pyannote_audio": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0"}, "1.3.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0"}}, "mlflow_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0"}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=2.22", "lightgbm", "xgboost"]}, "url": "", "version": "1.1.0"}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0"}}, "noise_reduction": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false}, "1.1.0": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false}}}}, "modules": {"development": {"count_events": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Count events in each time window", "example": "count_events.ipynb", "generationDate": "2025-09-16:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "count_events", "spec": {"filename": "count_events.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Count events in each time window", "example": "count_events.ipynb", "generationDate": "2025-09-16:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "count_events", "spec": {"filename": "count_events.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}}, "histogram_data_drift": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Model-monitoring application for detecting and visualizing data drift", "example": "histogram_data_drift.ipynb", "generationDate": "2025-11-06:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "histogram_data_drift", "spec": {"filename": "histogram_data_drift.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["plotly~=5.23", "pandas"]}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Model-monitoring application for detecting and visualizing data drift", "example": "histogram_data_drift.ipynb", "generationDate": "2025-11-06:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "histogram_data_drift", "spec": {"filename": "histogram_data_drift.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["plotly~=5.23", "pandas"]}, "version": "1.0.0"}}, "openai_proxy_app": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "OpenAI application runtime based on fastapi", "example": "openai_proxy_app.ipynb", "generationDate": "2025-11-11:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "openai_proxy_app", "spec": {"filename": "openai_proxy_app.py", "image": "mlrun/mlrun", "requirements": ["fastapi==0.124.0", "uvicorn[standard]==0.38.0", "gunicorn==23.0.0", "requests==2.32.5"], "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["genai"], "description": "OpenAI application runtime based on fastapi", "example": "openai_proxy_app.ipynb", "generationDate": "2025-11-11:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "openai_proxy_app", "spec": {"filename": "openai_proxy_app.py", "image": "mlrun/mlrun", "requirements": ["fastapi==0.124.0", "uvicorn[standard]==0.38.0", "gunicorn==23.0.0", "requests==2.32.5"], "kind": "generic"}, "version": "1.0.0"}}, "evidently_iris": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Demonstrates Evidently integration in MLRun for data quality and drift monitoring using the Iris dataset", "example": "evidently_iris.ipynb", "generationDate": "2025-11-09:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "evidently_iris", "spec": {"filename": "evidently_iris.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["scikit-learn~=1.5.2", "evidently~=0.7.5", "pandas", "sniffio~=1.3.0"]}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Demonstrates Evidently integration in MLRun for data quality and drift monitoring using the Iris dataset", "example": "evidently_iris.ipynb", "generationDate": "2025-11-09:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "evidently_iris", "spec": {"filename": "evidently_iris.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["scikit-learn~=1.5.2", "evidently~=0.7.5", "pandas", "sniffio~=1.3.0"]}, "version": "1.0.0"}}, "agent_deployer": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Helper for serving function deploy of an AI agents using MLRun", "example": "agent_deployer.ipynb", "generationDate": "2025-12-07", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "agent_deployer", "spec": {"filename": "agent_deployer.py", "image": "mlrun/mlrun", "kind": "generic", "requirements": null}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Helper for serving function deploy of an AI agents using MLRun", "example": "agent_deployer.ipynb", "generationDate": "2025-12-07", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "agent_deployer", "spec": {"filename": "agent_deployer.py", "image": "mlrun/mlrun", "kind": "generic", "requirements": null}, "version": "1.0.0"}}, "vllm_module": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "Deploys a vLLM OpenAI-compatible LLM server as an MLRun application runtime, with configurable GPU usage, node selection, tensor parallelism, and runtime flags.", "example": "vllm_module.ipynb", "generationDate": "2025-12-17:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "vllm_module", "spec": {"filename": "vllm_module.py", "image": "mlrun/mlrun", "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["genai"], "description": "Deploys a vLLM OpenAI-compatible LLM server as an MLRun application runtime, with configurable GPU usage, node selection, tensor parallelism, and runtime flags.", "example": "vllm_module.ipynb", "generationDate": "2025-12-17:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "vllm_module", "spec": {"filename": "vllm_module.py", "image": "mlrun/mlrun", "kind": "generic"}, "version": "1.0.0"}}, "oai_spo": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "OAI SPO use case for industrial optimization", "example": "oai_spo.ipynb", "generationDate": "2026-01-26:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.9.2", "name": "oai_spo", "spec": {"filename": "oai_spo.py", "image": "mlrun/mlrun", "kind": "generic", "requirements": null}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "OAI SPO use case for industrial optimization", "example": "oai_spo.ipynb", "generationDate": "2026-01-26:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.9.2", "name": "oai_spo", "spec": {"filename": "oai_spo.py", "image": "mlrun/mlrun", "kind": "generic", "requirements": null}, "version": "1.0.0"}}}, "master": {"count_events": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Count events in each time window", "example": "count_events.ipynb", "generationDate": "2025-09-16:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "count_events", "spec": {"filename": "count_events.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Count events in each time window", "example": "count_events.ipynb", "generationDate": "2025-09-16:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "count_events", "spec": {"filename": "count_events.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}}, "histogram_data_drift": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Model-monitoring application for detecting and visualizing data drift", "example": "histogram_data_drift.ipynb", "generationDate": "2025-11-06:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "histogram_data_drift", "spec": {"filename": "histogram_data_drift.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["plotly~=5.23", "pandas"]}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Model-monitoring application for detecting and visualizing data drift", "example": "histogram_data_drift.ipynb", "generationDate": "2025-11-06:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "histogram_data_drift", "spec": {"filename": "histogram_data_drift.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["plotly~=5.23", "pandas"]}, "version": "1.0.0"}}, "openai_proxy_app": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "OpenAI application runtime based on fastapi", "example": "openai_proxy_app.ipynb", "generationDate": "2025-11-11:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "openai_proxy_app", "spec": {"filename": "openai_proxy_app.py", "image": "mlrun/mlrun", "requirements": ["fastapi==0.124.0", "uvicorn[standard]==0.38.0", "gunicorn==23.0.0", "requests==2.32.5"], "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["genai"], "description": "OpenAI application runtime based on fastapi", "example": "openai_proxy_app.ipynb", "generationDate": "2025-11-11:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "openai_proxy_app", "spec": {"filename": "openai_proxy_app.py", "image": "mlrun/mlrun", "requirements": ["fastapi==0.124.0", "uvicorn[standard]==0.38.0", "gunicorn==23.0.0", "requests==2.32.5"], "kind": "generic"}, "version": "1.0.0"}}, "evidently_iris": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Demonstrates Evidently integration in MLRun for data quality and drift monitoring using the Iris dataset", "example": "evidently_iris.ipynb", "generationDate": "2025-11-09:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "evidently_iris", "spec": {"filename": "evidently_iris.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["scikit-learn~=1.5.2", "evidently~=0.7.5", "pandas", "sniffio~=1.3.0"]}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving", "structured-ML"], "description": "Demonstrates Evidently integration in MLRun for data quality and drift monitoring using the Iris dataset", "example": "evidently_iris.ipynb", "generationDate": "2025-11-09:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0-rc41", "name": "evidently_iris", "spec": {"filename": "evidently_iris.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": ["scikit-learn~=1.5.2", "evidently~=0.7.5", "pandas", "sniffio~=1.3.0"]}, "version": "1.0.0"}}, "agent_deployer": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Helper for serving function deploy of an AI agents using MLRun", "example": "agent_deployer.ipynb", "generationDate": "2025-12-07", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "agent_deployer", "spec": {"filename": "agent_deployer.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Helper for serving function deploy of an AI agents using MLRun", "example": "agent_deployer.ipynb", "generationDate": "2025-12-07", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "agent_deployer", "spec": {"filename": "agent_deployer.py", "image": "mlrun/mlrun", "kind": "monitoring_application", "requirements": null}, "version": "1.0.0"}}, "vllm_module": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "Deploys a vLLM OpenAI-compatible LLM server as an MLRun application runtime, with configurable GPU usage, node selection, tensor parallelism, and runtime flags.", "example": "vllm_module.ipynb", "generationDate": "2025-12-17:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "vllm_module", "spec": {"filename": "vllm_module.py", "image": "mlrun/mlrun", "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["genai"], "description": "Deploys a vLLM OpenAI-compatible LLM server as an MLRun application runtime, with configurable GPU usage, node selection, tensor parallelism, and runtime flags.", "example": "vllm_module.ipynb", "generationDate": "2025-12-17:12-25", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "vllm_module", "spec": {"filename": "vllm_module.py", "image": "mlrun/mlrun", "kind": "generic"}, "version": "1.0.0"}}}}, "steps": {"development": {"verify_schema": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "model-serving", "utilities"], "description": "Verifies the event is aligned with the provided schema", "example": "verify_schema.ipynb", "generationDate": "2025-12-29:11-59", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "verify_schema", "className": "VerifySchema", "defaultHandler": null, "spec": {"filename": "verify_schema.py", "image": "mlrun/mlrun", "requirements": null, "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["data-preparation", "model-serving", "utilities"], "description": "Verifies the event is aligned with the provided schema", "example": "verify_schema.ipynb", "generationDate": "2025-12-29:11-59", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "verify_schema", "className": "VerifySchema", "defaultHandler": null, "spec": {"filename": "verify_schema.py", "image": "mlrun/mlrun", "requirements": null, "kind": "generic"}, "version": "1.0.0"}}}, "master": {"verify_schema": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "model-serving", "utilities"], "description": "Verifies the event is aligned with the provided schema", "example": "verify_schema.ipynb", "generationDate": "2025-12-29:11-59", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "verify_schema", "className": "VerifySchema", "defaultHandler": null, "spec": {"filename": "verify_schema.py", "image": "mlrun/mlrun", "requirements": null, "kind": "generic"}, "version": "1.0.0"}, "1.0.0": {"apiVersion": "v1", "categories": ["data-preparation", "model-serving", "utilities"], "description": "Verifies the event is aligned with the provided schema", "example": "verify_schema.ipynb", "generationDate": "2025-12-29:11-59", "hidden": false, "labels": {"author": "Iguazio"}, "mlrunVersion": "1.10.0", "name": "verify_schema", "className": "VerifySchema", "defaultHandler": null, "spec": {"filename": "verify_schema.py", "image": "mlrun/mlrun", "requirements": null, "kind": "generic"}, "version": "1.0.0"}}}}} \ No newline at end of file diff --git a/functions/development/catalog.json b/functions/development/catalog.json index 7b29fd5d..b1acbbb0 100644 --- a/functions/development/catalog.json +++ b/functions/development/catalog.json @@ -1 +1 @@ -{"load_dataset": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/load_dataset.ipynb", "source": "src/load_dataset.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/load_dataset.ipynb", "source": "src/load_dataset.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "tf2_serving": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0", "assets": {"example": "src/tf2_serving.ipynb", "source": "src/tf2_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0", "assets": {"example": "src/tf2_serving.ipynb", "source": "src/tf2_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "feature_selection": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0", "assets": {"example": "src/feature_selection.ipynb", "source": "src/feature_selection.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.6.0": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0", "assets": {"example": "src/feature_selection.ipynb", "source": "src/feature_selection.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "github_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/github_utils.ipynb", "source": "src/github_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/github_utils.ipynb", "source": "src/github_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "auto_trainer": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0", "assets": {"example": "src/auto_trainer.ipynb", "source": "src/auto_trainer.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.8.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0", "assets": {"example": "src/auto_trainer.ipynb", "source": "src/auto_trainer.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/model_server.ipynb", "source": "src/model_server.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/model_server.ipynb", "source": "src/model_server.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "onnx_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.2", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.6.0", "torchvision~=0.21.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.3.0", "assets": {"example": "src/onnx_utils.ipynb", "source": "src/onnx_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.3.0": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.2", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.6.0", "torchvision~=0.21.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.3.0", "assets": {"example": "src/onnx_utils.ipynb", "source": "src/onnx_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "azureml_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true, "assets": {"example": "src/azureml_utils.ipynb", "source": "src/azureml_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.4.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true, "assets": {"example": "src/azureml_utils.ipynb", "source": "src/azureml_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "model_server_tester": {"latest": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/model_server_tester.ipynb", "source": "src/model_server_tester.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/model_server_tester.ipynb", "source": "src/model_server_tester.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "gen_class_data": {"latest": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0", "assets": {"example": "src/gen_class_data.ipynb", "source": "src/gen_class_data.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0", "assets": {"example": "src/gen_class_data.ipynb", "source": "src/gen_class_data.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "describe_spark": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/describe_spark.ipynb", "source": "src/describe_spark.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/describe_spark.ipynb", "source": "src/describe_spark.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "v2_model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/v2_model_server.ipynb", "source": "src/v2_model_server.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/v2_model_server.ipynb", "source": "src/v2_model_server.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "send_email": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/send_email.ipynb", "source": "src/send_email.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/send_email.ipynb", "source": "src/send_email.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "arc_to_parquet": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0", "assets": {"example": "src/arc_to_parquet.ipynb", "source": "src/arc_to_parquet.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.5.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0", "assets": {"example": "src/arc_to_parquet.ipynb", "source": "src/arc_to_parquet.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "open_archive": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/open_archive.ipynb", "source": "src/open_archive.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/open_archive.ipynb", "source": "src/open_archive.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "v2_model_tester": {"latest": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/v2_model_tester.ipynb", "source": "src/v2_model_tester.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/v2_model_tester.ipynb", "source": "src/v2_model_tester.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "aggregate": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0", "assets": {"example": "src/aggregate.ipynb", "source": "src/aggregate.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.4.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0", "assets": {"example": "src/aggregate.ipynb", "source": "src/aggregate.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "describe": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0", "assets": {"example": "src/describe.ipynb", "source": "src/describe.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.4.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0", "assets": {"example": "src/describe.ipynb", "source": "src/describe.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "azureml_serving": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0", "assets": {"example": "src/azureml_serving.ipynb", "source": "src/azureml_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0", "assets": {"example": "src/azureml_serving.ipynb", "source": "src/azureml_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "batch_inference": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0", "assets": {"example": "src/batch_inference.ipynb", "source": "src/batch_inference.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.8.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0", "assets": {"example": "src/batch_inference.ipynb", "source": "src/batch_inference.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "hugging_face_serving": {"latest": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false, "assets": {"example": "src/hugging_face_serving.ipynb", "source": "src/hugging_face_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false, "assets": {"example": "src/hugging_face_serving.ipynb", "source": "src/hugging_face_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "transcribe": {"latest": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0", "assets": {"example": "src/transcribe.ipynb", "source": "src/transcribe.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0", "assets": {"example": "src/transcribe.ipynb", "source": "src/transcribe.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "question_answering": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0", "assets": {"example": "src/question_answering.ipynb", "source": "src/question_answering.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "0.5.0": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0", "assets": {"example": "src/question_answering.ipynb", "source": "src/question_answering.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "pii_recognizer": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false, "assets": {"example": "src/pii_recognizer.ipynb", "source": "src/pii_recognizer.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "0.4.0": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false, "assets": {"example": "src/pii_recognizer.ipynb", "source": "src/pii_recognizer.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "batch_inference_v2": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0", "assets": {"example": "src/batch_inference_v2.ipynb", "source": "src/batch_inference_v2.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "2.6.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0", "assets": {"example": "src/batch_inference_v2.ipynb", "source": "src/batch_inference_v2.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "translate": {"latest": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true, "assets": {"example": "src/translate.ipynb", "source": "src/translate.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "0.3.0": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true, "assets": {"example": "src/translate.ipynb", "source": "src/translate.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "structured_data_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0", "assets": {"example": "src/structured_data_generator.ipynb", "source": "src/structured_data_generator.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.6.0": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0", "assets": {"example": "src/structured_data_generator.ipynb", "source": "src/structured_data_generator.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "text_to_audio_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false, "assets": {"example": "src/text_to_audio_generator.ipynb", "source": "src/text_to_audio_generator.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false, "assets": {"example": "src/text_to_audio_generator.ipynb", "source": "src/text_to_audio_generator.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "silero_vad": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0", "assets": {"example": "src/silero_vad.ipynb", "source": "src/silero_vad.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.4.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0", "assets": {"example": "src/silero_vad.ipynb", "source": "src/silero_vad.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "pyannote_audio": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0", "assets": {"example": "src/pyannote_audio.ipynb", "source": "src/pyannote_audio.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.3.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0", "assets": {"example": "src/pyannote_audio.ipynb", "source": "src/pyannote_audio.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "noise_reduction": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false, "assets": {"example": "src/noise_reduction.ipynb", "source": "src/noise_reduction.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false, "assets": {"example": "src/noise_reduction.ipynb", "source": "src/noise_reduction.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "mlflow_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0", "assets": {"example": "src/mlflow_utils.ipynb", "source": "src/mlflow_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "utils"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=2.22", "lightgbm", "xgboost"]}, "url": "", "version": "1.1.0", "assets": {"example": "src/mlflow_utils.ipynb", "source": "src/mlflow_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0", "assets": {"example": "src/mlflow_utils.ipynb", "source": "src/mlflow_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}} \ No newline at end of file +{"load_dataset": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/load_dataset.ipynb", "source": "src/load_dataset.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "load a toy dataset from scikit-learn", "doc": "README.md", "example": "load_dataset.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.6.0", "name": "load-dataset", "platformVersion": "3.5.5", "spec": {"filename": "load_dataset.py", "handler": "load_dataset", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/load_dataset.ipynb", "source": "src/load_dataset.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "tf2_serving": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0", "assets": {"example": "src/tf2_serving.ipynb", "source": "src/tf2_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "tf2 image classification server", "doc": "", "example": "tf2_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "tf2-serving", "platformVersion": "3.5.0", "spec": {"filename": "tf2_serving.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": ["requests", "pillow", "tensorflow>=2.1"]}, "url": "", "version": "1.1.0", "assets": {"example": "src/tf2_serving.ipynb", "source": "src/tf2_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "feature_selection": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0", "assets": {"example": "src/feature_selection.ipynb", "source": "src/feature_selection.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.6.0": {"apiVersion": "v1", "categories": ["data-preparation", "machine-learning"], "description": "Select features through multiple Statistical and Model filters", "doc": "", "example": "feature_selection.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc40", "name": "feature-selection", "platformVersion": "3.6.0", "spec": {"filename": "feature_selection.py", "handler": "feature_selection", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.6.0", "assets": {"example": "src/feature_selection.ipynb", "source": "src/feature_selection.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "github_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/github_utils.ipynb", "source": "src/github_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "add comments to github pull request", "doc": "", "example": "github_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "github-utils", "platformVersion": "3.5.0", "spec": {"filename": "github_utils.py", "handler": "run_summary_comment", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/github_utils.ipynb", "source": "src/github_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "auto_trainer": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0", "assets": {"example": "src/auto_trainer.ipynb", "source": "src/auto_trainer.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.8.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-training"], "description": "Automatic train, evaluate and predict functions for the ML frameworks - Scikit-Learn, XGBoost and LightGBM.", "doc": "", "example": "auto_trainer.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "auto_trainer", "platformVersion": "3.5.0", "spec": {"filename": "auto_trainer.py", "handler": "train", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.8.0", "assets": {"example": "src/auto_trainer.ipynb", "source": "src/auto_trainer.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/model_server.ipynb", "source": "src/model_server.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "model-server", "platformVersion": "3.5.0", "spec": {"filename": "model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "nuclio:serving", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/model_server.ipynb", "source": "src/model_server.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "onnx_utils": {"latest": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.8.0", "torchvision~=0.23.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "assets": {"example": "src/onnx_utils.ipynb", "source": "src/onnx_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.3.0": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.2", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.6.0", "torchvision~=0.21.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.3.0", "assets": {"example": "src/onnx_utils.ipynb", "source": "src/onnx_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.4.0": {"apiVersion": "v1", "categories": ["utilities", "deep-learning"], "description": "ONNX intigration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.", "doc": "", "example": "onnx_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "onnx_utils", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "with_mlrun": false}}, "filename": "onnx_utils.py", "handler": "to_onnx", "image": "mlrun/mlrun", "kind": "job", "requirements": ["tqdm~=4.67.1", "tensorflow~=2.19.0", "tf_keras~=2.19.0", "torch~=2.8.0", "torchvision~=0.23.0", "onnx~=1.17.0", "onnxruntime~=1.19.2", "onnxoptimizer~=0.3.13", "onnxmltools~=1.13.0", "tf2onnx~=1.16.1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "assets": {"example": "src/onnx_utils.ipynb", "source": "src/onnx_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "azureml_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true, "assets": {"example": "src/azureml_utils.ipynb", "source": "src/azureml_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.4.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Azure AutoML integration in MLRun, including utils functions for training models on Azure AutoML platfrom.", "doc": "", "example": "azureml_utils.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "azureml_utils", "platformVersion": "3.5.3", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": true, "commands": ["apt-get update && apt-get install -y --no-install-recommends git", "apt install -y liblttng-ust0"], "with_mlrun": true}}, "filename": "azureml_utils.py", "handler": "train", "image": "python:3.9-bullseye", "kind": "job", "requirements": ["azureml-core==1.54.0.post1", "azureml-train-automl-client==1.54.0.post1", "plotly~=5.23"]}, "url": "", "version": "1.4.0", "test_valid": true, "assets": {"example": "src/azureml_utils.ipynb", "source": "src/azureml_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "model_server_tester": {"latest": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/model_server_tester.ipynb", "source": "src/model_server_tester.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["monitoring", "model-serving"], "description": "test model servers", "doc": "", "example": "model_server_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "model-server-tester", "platformVersion": "3.5.0", "spec": {"filename": "model_server_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/model_server_tester.ipynb", "source": "src/model_server_tester.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "gen_class_data": {"latest": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0", "assets": {"example": "src/gen_class_data.ipynb", "source": "src/gen_class_data.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation"], "description": "Create a binary classification sample dataset and save.", "doc": "", "example": "gen_class_data.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "gen_class_data", "platformVersion": "3.5.3", "spec": {"filename": "gen_class_data.py", "handler": "gen_class_data", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.3.0", "assets": {"example": "src/gen_class_data.ipynb", "source": "src/gen_class_data.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "describe_spark": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/describe_spark.ipynb", "source": "src/describe_spark.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "", "doc": "", "example": "describe_spark.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "describe-spark", "platformVersion": "3.5.0", "spec": {"filename": "describe_spark.py", "handler": "describe_spark", "image": "iguazio/shell:3.0_b5565_20201026062233_wsdf", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/describe_spark.ipynb", "source": "src/describe_spark.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "v2_model_server": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/v2_model_server.ipynb", "source": "src/v2_model_server.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "machine-learning"], "description": "generic sklearn model server", "doc": "", "example": "v2_model_server.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio", "framework": "sklearn"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-server", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "ClassifierModel"}, "filename": "v2_model_server.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/v2_model_server.ipynb", "source": "src/v2_model_server.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "send_email": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/send_email.ipynb", "source": "src/send_email.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Send Email messages through SMTP server", "doc": "", "example": "send_email.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.4.1", "name": "send-email", "platformVersion": "3.5.3", "spec": {"filename": "send_email.py", "handler": "send_email", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/send_email.ipynb", "source": "src/send_email.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "arc_to_parquet": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0", "assets": {"example": "src/arc_to_parquet.ipynb", "source": "src/arc_to_parquet.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.5.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "retrieve remote archive, open and save as parquet", "doc": "", "example": "arc_to_parquet.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "arc-to-parquet", "platformVersion": "3.5.4", "spec": {"filename": "arc_to_parquet.py", "handler": "arc_to_parquet", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.5.0", "assets": {"example": "src/arc_to_parquet.ipynb", "source": "src/arc_to_parquet.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "open_archive": {"latest": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/open_archive.ipynb", "source": "src/open_archive.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["utilities"], "description": "Open a file/object archive into a target directory", "doc": "", "example": "open_archive.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0-rc50", "name": "open-archive", "platformVersion": "3.5.0", "spec": {"filename": "open_archive.py", "handler": "open_archive", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.2.0", "assets": {"example": "src/open_archive.ipynb", "source": "src/open_archive.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "v2_model_tester": {"latest": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/v2_model_tester.ipynb", "source": "src/v2_model_tester.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["model-testing", "machine-learning"], "description": "test v2 model servers", "doc": "", "example": "v2_model_tester.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "v2-model-tester", "platformVersion": "3.5.0", "spec": {"filename": "v2_model_tester.py", "handler": "model_server_tester", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.1.0", "assets": {"example": "src/v2_model_tester.ipynb", "source": "src/v2_model_tester.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "aggregate": {"latest": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0", "assets": {"example": "src/aggregate.ipynb", "source": "src/aggregate.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.4.0": {"apiVersion": "v1", "categories": ["data-preparation"], "description": "Rolling aggregation over Metrics and Lables according to specifications", "doc": "", "example": "aggregate.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "aggregate", "platformVersion": "3.5.4", "spec": {"filename": "aggregate.py", "handler": "aggregate", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0", "assets": {"example": "src/aggregate.ipynb", "source": "src/aggregate.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "describe": {"latest": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0", "assets": {"example": "src/describe.ipynb", "source": "src/describe.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.4.0": {"apiVersion": "v1", "categories": ["data-analysis"], "description": "describe and visualizes dataset stats", "doc": "", "example": "describe.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "describe", "platformVersion": "3.5.3", "spec": {"filename": "describe.py", "handler": "analyze", "image": "mlrun/mlrun", "kind": "job", "requirements": []}, "url": "", "version": "1.4.0", "assets": {"example": "src/describe.ipynb", "source": "src/describe.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "azureml_serving": {"latest": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0", "assets": {"example": "src/azureml_serving.ipynb", "source": "src/azureml_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["machine-learning", "model-serving"], "description": "AzureML serving function", "doc": "", "example": "azureml_serving.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "azureml_serving", "platformVersion": "3.5.0", "spec": {"customFields": {"default_class": "mlrun.frameworks.sklearn.PickleModelServer"}, "filename": "azureml_serving.py", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["azureml-automl-runtime~=1.38.1"]}, "url": "", "version": "1.1.0", "assets": {"example": "src/azureml_serving.ipynb", "source": "src/azureml_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "batch_inference": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0", "assets": {"example": "src/batch_inference.ipynb", "source": "src/batch_inference.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.8.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference.ipynb", "generationDate": "2022-08-28:17-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "batch_inference", "platformVersion": "3.5.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference.py", "handler": "infer", "image": "mlrun/ml-models", "kind": "job", "requirements": null}, "url": "", "version": "1.8.0", "assets": {"example": "src/batch_inference.ipynb", "source": "src/batch_inference.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "hugging_face_serving": {"latest": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false, "assets": {"example": "src/hugging_face_serving.ipynb", "source": "src/hugging_face_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["genai", "model-serving"], "description": "Generic Hugging Face model server.", "doc": "", "example": "hugging_face_serving.ipynb", "generationDate": "2022-09-05:17-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.1.0", "name": "hugging_face_serving", "platformVersion": "", "spec": {"customFields": {"default_class": "HuggingFaceModelServer"}, "filename": "hugging_face_serving.py", "handler": "handler", "image": "mlrun/ml-models", "kind": "serving", "requirements": ["transformers==4.21.3", "tensorflow==2.9.2"]}, "url": "", "version": "1.1.0", "test_valid": false, "assets": {"example": "src/hugging_face_serving.ipynb", "source": "src/hugging_face_serving.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "transcribe": {"latest": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0", "assets": {"example": "src/transcribe.ipynb", "source": "src/transcribe.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["audio", "genai"], "description": "Transcribe audio files into text files", "doc": "", "example": "transcribe.ipynb", "generationDate": "2023-07-13:11-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "transcribe", "platformVersion": "3.5.3", "spec": {"filename": "transcribe.py", "handler": "transcribe", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "tqdm", "torchaudio", "torch", "accelerate"]}, "url": "", "version": "1.2.0", "assets": {"example": "src/transcribe.ipynb", "source": "src/transcribe.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "question_answering": {"latest": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0", "assets": {"example": "src/question_answering.ipynb", "source": "src/question_answering.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "0.5.0": {"apiVersion": "v1", "categories": ["genai"], "description": "GenAI approach of question answering on a given data", "doc": "", "example": "question_answering.ipynb", "generationDate": "2023-08-07:11-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "question_answering", "platformVersion": "3.5.0", "spec": {"filename": "question_answering.py", "handler": "answer_questions", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "torch", "tqdm"]}, "url": "", "version": "0.5.0", "assets": {"example": "src/question_answering.ipynb", "source": "src/question_answering.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "pii_recognizer": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false, "assets": {"example": "src/pii_recognizer.ipynb", "source": "src/pii_recognizer.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "0.4.0": {"apiVersion": "v1", "categories": ["data-preparation", "NLP"], "description": "This function is used to recognize PII in a directory of text files", "doc": "", "example": "pii_recognizer.ipynb", "generationDate": "2023-08-15:10-24", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pii-recognizer", "platformVersion": "3.5.3", "spec": {"filename": "pii_recognizer.py", "handler": "recognize_pii", "image": "mlrun/mlrun", "kind": "job", "requirements": ["nltk", "pandas", "presidio-anonymizer", "presidio-analyzer", "torch", "flair@git+https://github.com/flairNLP/flair.git@d4ed67bf663e4066517f00397412510d90043653", "st-annotated-text", "https://huggingface.co/beki/en_spacy_pii_distilbert/resolve/main/en_spacy_pii_distilbert-any-py3-none-any.whl"]}, "url": "", "version": "0.4.0", "test_valid": false, "assets": {"example": "src/pii_recognizer.ipynb", "source": "src/pii_recognizer.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "batch_inference_v2": {"latest": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0", "assets": {"example": "src/batch_inference_v2.ipynb", "source": "src/batch_inference_v2.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "2.6.0": {"apiVersion": "v1", "categories": ["model-serving"], "description": "Batch inference (also knows as prediction) for the common ML frameworks (SciKit-Learn, XGBoost and LightGBM) while performing data drift analysis.", "doc": "", "example": "batch_inference_v2.ipynb", "generationDate": "2023-08-07:12-25", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0-rc51", "name": "batch_inference_v2", "platformVersion": "3.6.0", "spec": {"extra_spec": {"allow_empty_resources": true, "build": {"auto_build": false, "with_mlrun": false}}, "filename": "batch_inference_v2.py", "handler": "infer", "image": "mlrun/mlrun", "kind": "job", "requirements": null}, "url": "", "version": "2.6.0", "assets": {"example": "src/batch_inference_v2.ipynb", "source": "src/batch_inference_v2.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "translate": {"latest": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true, "assets": {"example": "src/translate.ipynb", "source": "src/translate.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "0.3.0": {"apiVersion": "v1", "categories": ["genai", "NLP"], "description": "Translate text files from one language to another", "doc": "", "example": "translate.ipynb", "generationDate": "2023-12-05:17-20", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0-rc41", "name": "translate", "platformVersion": "3.5.3", "spec": {"filename": "translate.py", "handler": "translate", "image": "mlrun/mlrun", "kind": "job", "requirements": ["transformers", "sentencepiece", "torch>=2.6", "tqdm"]}, "url": "", "version": "0.3.0", "test_valid": true, "assets": {"example": "src/translate.ipynb", "source": "src/translate.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "structured_data_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0", "assets": {"example": "src/structured_data_generator.ipynb", "source": "src/structured_data_generator.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.6.0": {"apiVersion": "v1", "categories": ["data-generation", "genai"], "description": "GenAI approach of generating structured data according to a given schema", "doc": "", "example": "structured_data_generator.ipynb", "generationDate": "2023-12-14:10-50", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "structured_data_generator", "platformVersion": "3.5.5", "spec": {"filename": "structured_data_generator.py", "handler": "generate_data", "image": "mlrun/mlrun", "kind": "job", "requirements": ["langchain", "tqdm"]}, "url": "", "version": "1.6.0", "assets": {"example": "src/structured_data_generator.ipynb", "source": "src/structured_data_generator.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "text_to_audio_generator": {"latest": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false, "assets": {"example": "src/text_to_audio_generator.ipynb", "source": "src/text_to_audio_generator.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.3.0": {"apiVersion": "v1", "categories": ["data-generation", "audio"], "description": "Generate audio file from text using different speakers", "doc": "", "example": "text_to_audio_generator.ipynb", "generationDate": "2023-12-03:15-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.1", "name": "text_to_audio_generator", "platformVersion": "3.5.3", "spec": {"filename": "text_to_audio_generator.py", "handler": "generate_multi_speakers_audio", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torchaudio", "pydub"]}, "url": "", "version": "1.3.0", "test_valid": false, "assets": {"example": "src/text_to_audio_generator.ipynb", "source": "src/text_to_audio_generator.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "silero_vad": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0", "assets": {"example": "src/silero_vad.ipynb", "source": "src/silero_vad.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.4.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "Silero VAD (Voice Activity Detection) functions.", "doc": "", "example": "silero_vad.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "silero_vad", "platformVersion": "3.5.3", "spec": {"filename": "silero_vad.py", "handler": "detect_voice", "image": "mlrun/mlrun", "kind": "job", "requirements": ["torch", "torchaudio", "tqdm", "onnxruntime"]}, "url": "", "version": "1.4.0", "assets": {"example": "src/silero_vad.ipynb", "source": "src/silero_vad.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "pyannote_audio": {"latest": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0", "assets": {"example": "src/pyannote_audio.ipynb", "source": "src/pyannote_audio.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.3.0": {"apiVersion": "v1", "categories": ["deep-learning", "audio"], "description": "pyannote's speech diarization of audio files", "doc": "", "example": "pyannote_audio.ipynb", "generationDate": "2023-12-03:14-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.7.0", "name": "pyannote-audio", "platformVersion": "3.5.3", "spec": {"filename": "pyannote_audio.py", "handler": "diarize", "image": "mlrun/mlrun-gpu", "kind": "job", "requirements": ["pyannote.audio", "pyannote.core", "torchaudio", "tqdm"]}, "url": "", "version": "1.3.0", "assets": {"example": "src/pyannote_audio.ipynb", "source": "src/pyannote_audio.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "noise_reduction": {"latest": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false, "assets": {"example": "src/noise_reduction.ipynb", "source": "src/noise_reduction.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["data-preparation", "audio"], "description": "Reduce noise from audio files", "doc": "", "example": "noise_reduction.ipynb", "generationDate": "2024-03-04:17-30", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "mlrunVersion": "1.7.0", "name": "noise-reduction", "platformVersion": "3.5.3", "spec": {"filename": "noise_reduction.py", "handler": "reduce_noise", "image": "mlrun/mlrun", "kind": "job", "requirements": ["librosa", "noisereduce", "deepfilternet", "torchaudio>=2.1.2"]}, "url": "", "version": "1.1.0", "test_valid": false, "assets": {"example": "src/noise_reduction.ipynb", "source": "src/noise_reduction.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}, "mlflow_utils": {"latest": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0", "assets": {"example": "src/mlflow_utils.ipynb", "source": "src/mlflow_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.1.0": {"apiVersion": "v1", "categories": ["model-serving", "utils"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.8.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=2.22", "lightgbm", "xgboost"]}, "url": "", "version": "1.1.0", "assets": {"example": "src/mlflow_utils.ipynb", "source": "src/mlflow_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}, "1.2.0": {"apiVersion": "v1", "categories": ["model-serving", "utilities"], "description": "Mlflow model server, and additional utils.", "doc": "", "example": "mlflow_utils.ipynb", "generationDate": "2024-05-23:12-00", "hidden": false, "icon": "", "labels": {"author": "Iguazio"}, "maintainers": [], "marketplaceType": "", "mlrunVersion": "1.10.0", "name": "mlflow_utils", "platformVersion": "", "spec": {"customFields": {"default_class": "MLFlowModelServer"}, "filename": "mlflow_utils.py", "handler": "handler", "image": "mlrun/mlrun", "kind": "serving", "requirements": ["mlflow~=3.5"]}, "url": "", "version": "1.2.0", "assets": {"example": "src/mlflow_utils.ipynb", "source": "src/mlflow_utils.py", "docs": "static/documentation.html", "function": "src/function.yaml"}}}} \ No newline at end of file diff --git a/functions/development/onnx_utils/1.4.0/src/function.yaml b/functions/development/onnx_utils/1.4.0/src/function.yaml new file mode 100644 index 00000000..091002cd --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/src/function.yaml @@ -0,0 +1,189 @@ +metadata: + name: onnx-utils + tag: '' + categories: + - utilities + - deep-learning +kind: job +spec: + entry_points: + tf_keras_to_onnx: + name: tf_keras_to_onnx + parameters: + - name: model_handler + doc: An initialized TFKerasModelHandler with a loaded model to convert to + ONNX. + - name: onnx_model_name + type: str + doc: The name to use to log the converted ONNX model. If not given, the given + `model_name` will be used with an additional suffix `_onnx`. Defaulted to + None. + default: null + - name: optimize_model + type: bool + doc: Whether or not to optimize the ONNX model using 'onnxoptimizer' before + saving the model. Defaulted to True. + default: true + - name: input_signature + type: List[Tuple[Tuple[int], str]] + doc: 'A list of the input layers shape and data type properties. Expected + to receive a list where each element is an input layer tuple. An input layer + tuple is a tuple of: [0] = Layer''s shape, a tuple of integers. [1] = Layer''s + data type, a mlrun.data_types.ValueType string. If None, the input signature + will be tried to be read from the model artifact. Defaulted to None.' + default: null + doc: Convert a TF.Keras model to an ONNX model and log it back to MLRun as a + new model object. + lineno: 26 + has_varargs: false + has_kwargs: false + pytorch_to_onnx: + name: pytorch_to_onnx + parameters: + - name: model_handler + doc: An initialized PyTorchModelHandler with a loaded model to convert to + ONNX. + - name: onnx_model_name + type: str + doc: The name to use to log the converted ONNX model. If not given, the given + `model_name` will be used with an additional suffix `_onnx`. Defaulted to + None. + default: null + - name: optimize_model + type: bool + doc: Whether or not to optimize the ONNX model using 'onnxoptimizer' before + saving the model. Defaulted to True. + default: true + - name: input_signature + type: List[Tuple[Tuple[int, ], str]] + doc: 'A list of the input layers shape and data type properties. Expected + to receive a list where each element is an input layer tuple. An input layer + tuple is a tuple of: [0] = Layer''s shape, a tuple of integers. [1] = Layer''s + data type, a mlrun.data_types.ValueType string. If None, the input signature + will be tried to be read from the model artifact. Defaulted to None.' + default: null + - name: input_layers_names + type: List[str] + doc: 'List of names to assign to the input nodes of the graph in order. All + of the other parameters (inner layers) can be set as well by passing additional + names in the list. The order is by the order of the parameters in the model. + If None, the inputs will be read from the handler''s inputs. If its also + None, it is defaulted to: "input_0", "input_1", ...' + default: null + - name: output_layers_names + type: List[str] + doc: 'List of names to assign to the output nodes of the graph in order. If + None, the outputs will be read from the handler''s outputs. If its also + None, it is defaulted to: "output_0" (for multiple outputs, this parameter + must be provided).' + default: null + - name: dynamic_axes + type: Dict[str, Dict[int, str]] + doc: 'If part of the input / output shape is dynamic, like (batch_size, 3, + 32, 32) you can specify it by giving a dynamic axis to the input / output + layer by its name as follows: { "input layer name": {0: "batch_size"}, "output + layer name": {0: "batch_size"}, } If provided, the ''is_batched'' flag will + be ignored. Defaulted to None.' + default: null + - name: is_batched + type: bool + doc: Whether to include a batch size as the first axis in every input and + output layer. Defaulted to True. Will be ignored if 'dynamic_axes' is provided. + default: true + doc: Convert a PyTorch model to an ONNX model and log it back to MLRun as a + new model object. + lineno: 81 + has_varargs: false + has_kwargs: false + to_onnx: + name: to_onnx + parameters: + - name: context + type: MLClientCtx + doc: The MLRun function execution context + - name: model_path + type: str + doc: The model path store object. + - name: load_model_kwargs + type: dict + doc: Keyword arguments to pass to the `AutoMLRun.load_model` method. + default: null + - name: onnx_model_name + type: str + doc: The name to use to log the converted ONNX model. If not given, the given + `model_name` will be used with an additional suffix `_onnx`. Defaulted to + None. + default: null + - name: optimize_model + type: bool + doc: Whether to optimize the ONNX model using 'onnxoptimizer' before saving + the model. Defaulted to True. + default: true + - name: framework_kwargs + type: Dict[str, Any] + doc: Additional arguments each framework may require to convert to ONNX. To + get the doc string of the desired framework onnx conversion function, pass + "help". + default: null + doc: Convert the given model to an ONNX model. + lineno: 160 + has_varargs: false + has_kwargs: false + optimize: + name: optimize + parameters: + - name: context + type: MLClientCtx + doc: The MLRun function execution context. + - name: model_path + type: str + doc: Path to the ONNX model object. + - name: handler_init_kwargs + type: dict + doc: Keyword arguments to pass to the `ONNXModelHandler` init method preloading. + default: null + - name: optimizations + type: List[str] + doc: List of possible optimizations. To see what optimizations are available, + pass "help". If None, all the optimizations will be used. Defaulted to None. + default: null + - name: fixed_point + type: bool + doc: Optimize the weights using fixed point. Defaulted to False. + default: false + - name: optimized_model_name + type: str + doc: The name of the optimized model. If None, the original model will be + overridden. Defaulted to None. + default: null + doc: Optimize the given ONNX model. + lineno: 224 + has_varargs: false + has_kwargs: false + image: '' + default_handler: to_onnx + allow_empty_resources: true + command: '' + disable_auto_mount: false + description: ONNX intigration in MLRun, some utils functions for the ONNX framework, + optimizing and converting models from different framework to ONNX using MLRun. + build: + functionSourceCode: # Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Dict, List, Tuple

import mlrun


class _ToONNXConversions:
    """
    An ONNX conversion functions library class.
    """

    @staticmethod
    def tf_keras_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int], str]] = None,
    ):
        """
        Convert a TF.Keras model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:   An initialized TFKerasModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name`
                                will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:  Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                                Defaulted to True.
        :param input_signature: A list of the input layers shape and data type properties. Expected to receive a list
                                where each element is an input layer tuple. An input layer tuple is a tuple of:
                                [0] = Layer's shape, a tuple of integers.
                                [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                If None, the input signature will be tried to be read from the model artifact. Defaulted
                                to None.
        """
        # Import the framework and handler:
        import tensorflow as tf
        from mlrun.frameworks.tf_keras import TFKerasUtils

        # Check the given 'input_signature' parameter:
        if input_signature is None:
            # Read the inputs from the model:
            try:
                model_handler.read_inputs_from_model()
            except Exception as error:
                raise mlrun.errors.MLRunRuntimeError(
                    f"Please provide the 'input_signature' parameter. The function tried reading the input layers "
                    f"information automatically but failed with the following error: {error}"
                )
        else:
            # Parse the 'input_signature' parameter:
            input_signature = [
                tf.TensorSpec(
                    shape=shape,
                    dtype=TFKerasUtils.convert_value_type_to_tf_dtype(
                        value_type=value_type
                    ),
                )
                for (shape, value_type) in input_signature
            ]

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_signature=input_signature,
            optimize=optimize_model,
        )

    @staticmethod
    def pytorch_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int, ...], str]] = None,
        input_layers_names: List[str] = None,
        output_layers_names: List[str] = None,
        dynamic_axes: Dict[str, Dict[int, str]] = None,
        is_batched: bool = True,
    ):
        """
        Convert a PyTorch model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:       An initialized PyTorchModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name:     The name to use to log the converted ONNX model. If not given, the given
                                    `model_name` will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:      Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the
                                    model. Defaulted to True.
        :param input_signature:     A list of the input layers shape and data type properties. Expected to receive a
                                    list where each element is an input layer tuple. An input layer tuple is a tuple of:
                                    [0] = Layer's shape, a tuple of integers.
                                    [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                    If None, the input signature will be tried to be read from the model artifact.
                                    Defaulted to None.
        :param input_layers_names:  List of names to assign to the input nodes of the graph in order. All of the other
                                    parameters (inner layers) can be set as well by passing additional names in the
                                    list. The order is by the order of the parameters in the model. If None, the inputs
                                    will be read from the handler's inputs. If its also None, it is defaulted to:
                                    "input_0", "input_1", ...
        :param output_layers_names: List of names to assign to the output nodes of the graph in order. If None, the
                                    outputs will be read from the handler's outputs. If its also None, it is defaulted
                                    to: "output_0" (for multiple outputs, this parameter must be provided).
        :param dynamic_axes:        If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can
                                    specify it by giving a dynamic axis to the input / output layer by its name as
                                    follows: {
                                        "input layer name": {0: "batch_size"},
                                        "output layer name": {0: "batch_size"},
                                    }
                                    If provided, the 'is_batched' flag will be ignored. Defaulted to None.
        :param is_batched:          Whether to include a batch size as the first axis in every input and output layer.
                                    Defaulted to True. Will be ignored if 'dynamic_axes' is provided.
        """
        # Import the framework and handler:
        import torch
        from mlrun.frameworks.pytorch import PyTorchUtils

        # Parse the 'input_signature' parameter:
        if input_signature is not None:
            input_signature = tuple(
                [
                    torch.zeros(
                        size=shape,
                        dtype=PyTorchUtils.convert_value_type_to_torch_dtype(
                            value_type=value_type
                        ),
                    )
                    for (shape, value_type) in input_signature
                ]
            )

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_sample=input_signature,
            optimize=optimize_model,
            input_layers_names=input_layers_names,
            output_layers_names=output_layers_names,
            dynamic_axes=dynamic_axes,
            is_batched=is_batched,
        )


# Map for getting the conversion function according to the provided framework:
_CONVERSION_MAP = {
    "tensorflow.keras": _ToONNXConversions.tf_keras_to_onnx,
    "torch": _ToONNXConversions.pytorch_to_onnx,
}  # type: Dict[str, Callable]


def to_onnx(
    context: mlrun.MLClientCtx,
    model_path: str,
    load_model_kwargs: dict = None,
    onnx_model_name: str = None,
    optimize_model: bool = True,
    framework_kwargs: Dict[str, Any] = None,
):
    """
    Convert the given model to an ONNX model.

    :param context:           The MLRun function execution context
    :param model_path:        The model path store object.
    :param load_model_kwargs: Keyword arguments to pass to the `AutoMLRun.load_model` method.
    :param onnx_model_name:   The name to use to log the converted ONNX model. If not given, the given `model_name` will
                              be used with an additional suffix `_onnx`. Defaulted to None.
    :param optimize_model:    Whether to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                              Defaulted to True.
    :param framework_kwargs:  Additional arguments each framework may require to convert to ONNX. To get the doc string
                              of the desired framework onnx conversion function, pass "help".
    """
    from mlrun.frameworks.auto_mlrun.auto_mlrun import AutoMLRun

    # Get a model handler of the required framework:
    load_model_kwargs = load_model_kwargs or {}
    model_handler = AutoMLRun.load_model(
        model_path=model_path, context=context, **load_model_kwargs
    )

    # Get the model's framework:
    framework = model_handler.FRAMEWORK_NAME

    # Use the conversion map to get the specific framework to onnx conversion:
    if framework not in _CONVERSION_MAP:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"The following framework: '{framework}', has no ONNX conversion."
        )
    conversion_function = _CONVERSION_MAP[framework]

    # Check if needed to print the function's doc string ("help" is passed):
    if framework_kwargs == "help":
        print(conversion_function.__doc__)
        return

    # Set the default empty framework kwargs if needed:
    if framework_kwargs is None:
        framework_kwargs = {}

    # Run the conversion:
    try:
        conversion_function(
            model_handler=model_handler,
            onnx_model_name=onnx_model_name,
            optimize_model=optimize_model,
            **framework_kwargs,
        )
    except TypeError as exception:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"ERROR: A TypeError exception was raised during the conversion:\n{exception}. "
            f"Please read the {framework} framework conversion function doc string by passing 'help' in the "
            f"'framework_kwargs' dictionary parameter."
        )


def optimize(
    context: mlrun.MLClientCtx,
    model_path: str,
    handler_init_kwargs: dict = None,
    optimizations: List[str] = None,
    fixed_point: bool = False,
    optimized_model_name: str = None,
):
    """
    Optimize the given ONNX model.

    :param context:              The MLRun function execution context.
    :param model_path:           Path to the ONNX model object.
    :param handler_init_kwargs:  Keyword arguments to pass to the `ONNXModelHandler` init method preloading.
    :param optimizations:        List of possible optimizations. To see what optimizations are available, pass "help".
                                 If None, all the optimizations will be used. Defaulted to None.
    :param fixed_point:          Optimize the weights using fixed point. Defaulted to False.
    :param optimized_model_name: The name of the optimized model. If None, the original model will be overridden.
                                 Defaulted to None.
    """
    # Import the model handler:
    import onnxoptimizer
    from mlrun.frameworks.onnx import ONNXModelHandler

    # Check if needed to print the available optimizations ("help" is passed):
    if optimizations == "help":
        available_passes = "\n* ".join(onnxoptimizer.get_available_passes())
        print(f"The available optimizations are:\n* {available_passes}")
        return

    # Create the model handler:
    handler_init_kwargs = handler_init_kwargs or {}
    model_handler = ONNXModelHandler(
        model_path=model_path, context=context, **handler_init_kwargs
    )

    # Load the ONNX model:
    model_handler.load()

    # Optimize the model using the given configurations:
    model_handler.optimize(optimizations=optimizations, fixed_point=fixed_point)

    # Rename if needed:
    if optimized_model_name is not None:
        model_handler.set_model_name(model_name=optimized_model_name)

    # Log the optimized model:
    model_handler.log()
 + base_image: mlrun/mlrun + with_mlrun: false + auto_build: true + requirements: + - tqdm~=4.67.1 + - tensorflow~=2.19.0 + - tf_keras~=2.19.0 + - torch~=2.8.0 + - torchvision~=0.23.0 + - onnx~=1.17.0 + - onnxruntime~=1.19.2 + - onnxoptimizer~=0.3.13 + - onnxmltools~=1.13.0 + - tf2onnx~=1.16.1 + - plotly~=5.23 + origin_filename: '' + code_origin: '' +verbose: false diff --git a/functions/development/onnx_utils/1.4.0/src/item.yaml b/functions/development/onnx_utils/1.4.0/src/item.yaml new file mode 100644 index 00000000..5f129389 --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/src/item.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +categories: +- utilities +- deep-learning +description: ONNX intigration in MLRun, some utils functions for the ONNX framework, + optimizing and converting models from different framework to ONNX using MLRun. +doc: '' +example: onnx_utils.ipynb +generationDate: 2022-08-28:17-25 +hidden: false +icon: '' +labels: + author: Iguazio +maintainers: [] +marketplaceType: '' +mlrunVersion: 1.10.0 +name: onnx_utils +platformVersion: 3.5.0 +spec: + extra_spec: + allow_empty_resources: true + build: + auto_build: true + with_mlrun: false + filename: onnx_utils.py + handler: to_onnx + image: mlrun/mlrun + kind: job + requirements: + - tqdm~=4.67.1 + - tensorflow~=2.19.0 + - tf_keras~=2.19.0 + - torch~=2.8.0 + - torchvision~=0.23.0 + - onnx~=1.17.0 + - onnxruntime~=1.19.2 + - onnxoptimizer~=0.3.13 + - onnxmltools~=1.13.0 + - tf2onnx~=1.16.1 + - plotly~=5.23 +url: '' +version: 1.4.0 diff --git a/functions/development/onnx_utils/1.4.0/src/onnx_utils.ipynb b/functions/development/onnx_utils/1.4.0/src/onnx_utils.ipynb new file mode 100644 index 00000000..14c810fa --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/src/onnx_utils.ipynb @@ -0,0 +1,1172 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# ONNX Utils\n", + "\n", + "A collection of ONNX utils in one MLRun function. The function includes the following handlers:\n", + "\n", + "1. [to_onnx](#handler1) - Convert your model into `onnx` format.\n", + "2. [optimize](#handler2) - Perform ONNX optimizations using `onnxmodeloptimizer` on a given ONNX model." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## 1. to_onnx\n", + "\n", + "### 1.1. Docs\n", + "Convert the given model to an ONNX model.\n", + "\n", + "#### Parameters:\n", + "* **`context`**: `mlrun.MLClientCtx` - The MLRun function execution context\n", + "* **`model_path`**: `str` - The model path store object.\n", + "* **`onnx_model_name`**: `str = None` - The name to use to log the converted ONNX model. If not given, the given `model_name` will be used with an additional suffix `_onnx`. Defaulted to None.\n", + "* **`optimize_model`**: `bool = True` - Whether to optimize the ONNX model using 'onnxoptimizer' before saving the model. Defaulted to True.\n", + "* **`framework`**: `str = None` - The model's framework. If None, it will be read from the 'framework' label of the model artifact provided. Defaulted to None.\n", + "* **`framework_kwargs`**: `Dict[str, Any] = None` - Additional arguments each framework may require in order to convert to ONNX. *To get the doc string of the desired framework onnx conversion function, **pass \"help\"**.*" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "#### Supported keyword arguments (`framework_kwargs`) per framework:\n", + "`tensorflow.keras`:\n", + "* **`input_signature`**: `List[Tuple[Tuple[int], str]] = None` - A list of the input layers shape and data type properties. Expected to receive a list where each element is an input layer tuple. An input layer tuple is a tuple of:\n", + " * [0] = Layer's shape, a tuple of integers.\n", + " * [1] = Layer's data type, a mlrun.data_types.ValueType string.\n", + "\n", + " If None, the input signature will be tried to be read automatically before converting to ONNX or from the model artifact if available. Defaulted to None.\n", + "\n", + "`torch`:\n", + "* **`input_signature`**: `List[Tuple[Tuple[int], str]] = None` - A list of the input layers shape and data type properties. Expected to receive a list where each element is an input layer tuple. An input layer tuple is a tuple of:\n", + " * [0] = Layer's shape, a tuple of integers.\n", + " * [1] = Layer's data type, a mlrun.data_types.ValueType string.\n", + "\n", + " If None, the input signature will be read from the model artifact if available. Defaulted to None.\n", + "* **`input_layers_names`**: `List[str] = None` - List of names to assign to the input nodes of the graph in order. All of the other parameters (inner layers) can be set as well by passing additional names in the list. The order is by the order of the parameters in the model. If None, the inputs will be read from the handler's inputs. If its also None, it is defaulted to: \"input_0\", \"input_1\", ...\n", + "* **`output_layers_names`**: `List[str] = None` - List of names to assign to the output nodes of the graph in order. If None, the outputs will be read from the handler's outputs. If its also None, it is defaulted to: \"output_0\" (for multiple outputs, this parameter must be provided).\n", + "* **`param dynamic_axes`**: `Dict[str, Dict[int, str]] = None` - If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can specify it by giving a dynamic axis to the input / output layer by its name as follows:\n", + "```python\n", + "{\n", + " \"input layer name\": {0: \"batch_size\"},\n", + " \"output layer name\": {0: \"batch_size\"},\n", + "}\n", + "```\n", + "If provided, the 'is_batched' flag will be ignored. Defaulted to None.\n", + "* **`is_batched`**: `bool = True` - Whether to include a batch size as the first axis in every input and output layer. Defaulted to True. Will be ignored if 'dynamic_axes' is provided." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "### 1.2. Demo\n", + "\n", + "We will use the `PyTorch` framework, a `MobileNetV2` as our model and we will convert it to ONNX using the `to_onnx` handler.\n", + "\n", + "1.2.1. First we will set the artifact path for our model to be saved in and choose the models names:" + ] + }, + { + "cell_type": "code", + "metadata": { + "pycharm": { + "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:13:28.256582Z", + "start_time": "2026-02-10T14:13:28.250886Z" + } + }, + "source": [ + "import os\n", + "import tempfile\n", + "# Use a temporary directory for model artifacts (safe cleanup):\n", + "ARTIFACT_PATH = tempfile.mkdtemp()\n", + "os.environ[\"MLRUN_ARTIFACT_PATH\"] = ARTIFACT_PATH\n", + "\n", + "# Project name:\n", + "PROJECT_NAME = \"onnx-utils\"\n", + "\n", + "# Choose our model's name:\n", + "MODEL_NAME = \"mobilenetv2\"\n", + "\n", + "# Choose our ONNX version model's name:\n", + "ONNX_MODEL_NAME = \"onnx_mobilenetv2\"\n", + "\n", + "# Choose our optimized ONNX version model's name:\n", + "OPTIMIZED_ONNX_MODEL_NAME = \"optimized_onnx_mobilenetv2\"" + ], + "outputs": [], + "execution_count": 1 + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "1.2.2. Download the model from `torchvision.models` and log it with MLRun's `PyTorchModelHandler`:" + ] + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-10T14:00:15.032590Z", + "start_time": "2026-02-10T14:00:15.031196Z" + } + }, + "cell_type": "code", + "source": "# mlrun: start-code", + "outputs": [], + "execution_count": 8 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-10T14:14:00.992001Z", + "start_time": "2026-02-10T14:13:33.115438Z" + } + }, + "cell_type": "code", + "source": [ + "import torchvision\n", + "\n", + "import mlrun\n", + "from mlrun.frameworks.pytorch import PyTorchModelHandler\n", + "\n", + "\n", + "def get_model(context: mlrun.MLClientCtx, model_name: str):\n", + " # Download the MobileNetV2 model:\n", + " model = torchvision.models.mobilenet_v2()\n", + "\n", + " # Initialize a model handler for logging the model:\n", + " model_handler = PyTorchModelHandler(\n", + " model_name=model_name,\n", + " model=model,\n", + " model_class=\"mobilenet_v2\",\n", + " modules_map={\"torchvision.models\": \"mobilenet_v2\"},\n", + " context=context,\n", + " )\n", + "\n", + " # Log the model:\n", + " model_handler.log()" + ], + "outputs": [], + "execution_count": 2 + }, + { + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-10T14:00:15.040221Z", + "start_time": "2026-02-10T14:00:15.038886Z" + } + }, + "cell_type": "code", + "source": "# mlrun: end-code", + "outputs": [], + "execution_count": 10 + }, + { + "cell_type": "code", + "metadata": { + "pycharm": { + "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:14:34.429194Z", + "start_time": "2026-02-10T14:14:07.906087Z" + } + }, + "source": [ + "import mlrun\n", + "\n", + "# Create or get the MLRun project:\n", + "project = mlrun.get_or_create_project(PROJECT_NAME, context=\"./\")\n", + "\n", + "# Create the function parsing this notebook's code using 'code_to_function':\n", + "get_model_function = mlrun.code_to_function(\n", + " name=\"get_mobilenetv2\",\n", + " project=PROJECT_NAME,\n", + " kind=\"job\",\n", + " image=\"mlrun/ml-models\"\n", + ")\n", + "\n", + "# Run the function to log the model:\n", + "get_model_run = get_model_function.run(\n", + " handler=\"get_model\",\n", + " output_path=ARTIFACT_PATH,\n", + " params={\n", + " \"model_name\": MODEL_NAME\n", + " },\n", + " local=True\n", + ")" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:14:24,932 [info] Created and saved project: {\"context\":\"./\",\"from_template\":null,\"name\":\"onnx-utils\",\"overwrite\":false,\"save\":true}\n", + "> 2026-02-10 16:14:24,933 [info] Project created successfully: {\"project_name\":\"onnx-utils\",\"stored_in_db\":true}\n", + "> 2026-02-10 16:14:31,659 [info] Storing function: {\"db\":null,\"name\":\"get-mobilenetv2-get-model\",\"uid\":\"7b9d1b54375b44e191d73685a382c910\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "\n", + "
\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:14:32NaTcompletedrunget-mobilenetv2-get-model
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_name=mobilenetv2
mobilenetv2_modules_map.json=store://artifacts/onnx-utils/#0@7b9d1b54375b44e191d73685a382c910
model=store://models/onnx-utils/mobilenetv2#0@7b9d1b54375b44e191d73685a382c910^e0393bc5b070fd55cc57cecb94160ce412498e0f
\n", + "
\n", + "
\n", + "
\n", + " Title\n", + " ×\n", + "
\n", + " \n", + "
\n", + "
\n" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + " > to track results use the .show() or .logs() methods or click here to open in UI" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:14:34,427 [info] Run execution finished: {\"name\":\"get-mobilenetv2-get-model\",\"status\":\"completed\"}\n" + ] + } + ], + "execution_count": 3 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "1.2.4. Import the `onnx_utils` MLRun function and run it:" + ] + }, + { + "cell_type": "code", + "metadata": { + "pycharm": { + "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:14:53.863947Z", + "start_time": "2026-02-10T14:14:48.088349Z" + } + }, + "source": "# Import the ONNX function from the marketplace:\nonnx_utils_function = mlrun.import_function(\"hub://onnx_utils\", project=PROJECT_NAME)\n\n# Construct the model path from the run directory structure:\nmodel_path = os.path.join(ARTIFACT_PATH, \"get-mobilenetv2-get-model\", \"0\", \"model\")\nmodules_map_path = os.path.join(ARTIFACT_PATH, \"get-mobilenetv2-get-model\", \"0\", \"mobilenetv2_modules_map.json.json\")\n\n# Run the function to convert our model to ONNX:\nto_onnx_run = onnx_utils_function.run(\n handler=\"to_onnx\",\n output_path=ARTIFACT_PATH,\n params={\n \"model_name\": MODEL_NAME,\n \"model_path\": model_path,\n \"load_model_kwargs\": {\n \"model_name\": MODEL_NAME,\n \"model_class\": \"mobilenet_v2\",\n \"modules_map\": modules_map_path,\n },\n \"onnx_model_name\": ONNX_MODEL_NAME,\n \"optimize_model\": False, # <- For optimizing it later in the demo, we mark the flag as False\n \"framework_kwargs\": {\"input_signature\": [((32, 3, 224, 224), \"float32\")]},\n },\n local=True\n)", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:14:48,519 [info] Storing function: {\"db\":null,\"name\":\"onnx-utils-to-onnx\",\"uid\":\"95deb2c7dbf0460291efb25c48eeebd7\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "\n", + "
\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:14:49NaTcompletedrunonnx-utils-to-onnx
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_name=mobilenetv2
model_path=/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/get-mobilenetv2-get-model/0/model
load_model_kwargs={'model_name': 'mobilenetv2', 'model_class': 'mobilenet_v2', 'modules_map': '/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/get-mobilenetv2-get-model/0/mobilenetv2_modules_map.json.json'}
onnx_model_name=onnx_mobilenetv2
optimize_model=False
framework_kwargs={'input_signature': [((32, 3, 224, 224), 'float32')]}
model=store://models/onnx-utils/onnx_mobilenetv2#0@95deb2c7dbf0460291efb25c48eeebd7^03e4286da44d015cf5465d43e809a504d15f7f63
\n", + "
\n", + "
\n", + "
\n", + " Title\n", + " ×\n", + "
\n", + " \n", + "
\n", + "
\n" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + " > to track results use the .show() or .logs() methods or click here to open in UI" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:14:53,862 [info] Run execution finished: {\"name\":\"onnx-utils-to-onnx\",\"status\":\"completed\"}\n" + ] + } + ], + "execution_count": 4 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "1.2.5. Now we verify the ONNX model was created:" + ] + }, + { + "cell_type": "code", + "metadata": { + "pycharm": { + "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:14:56.820411Z", + "start_time": "2026-02-10T14:14:56.817892Z" + } + }, + "source": [ + "import os\n", + "\n", + "onnx_model_file = os.path.join(ARTIFACT_PATH, \"onnx-utils-to-onnx\", \"0\", \"model\", \"onnx_mobilenetv2.onnx\")\n", + "assert os.path.isfile(onnx_model_file), f\"ONNX model not found at {onnx_model_file}\"\n", + "print(f\"ONNX model created at: {onnx_model_file}\")" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ONNX model created at: /var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-to-onnx/0/model/onnx_mobilenetv2.onnx\n" + ] + } + ], + "execution_count": 5 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "\n", + "## 2. optimize\n", + "\n", + "### 2.1. Docs\n", + "Optimize the given ONNX model.\n", + "\n", + "#### Parameters:\n", + "* **`context`**: `mlrun.MLClientCtx` - The MLRun function execution context\n", + "* **`model_path`**: `str` - The model path store object.\n", + "* **`optimizations`**: `List[str] = None` - List of possible optimizations. *To see what optimizations are available, **pass \"help\"**.* If None, all of the optimizations will be used. Defaulted to None.\n", + "* **`fixed_point`**: `bool = False` - Optimize the weights using fixed point. Defaulted to False.\n", + "* **`optimized_model_name`**: `str = None` - The name of the optimized model. If None, the original model will be overridden. Defaulted to None." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.2. Demo\n", + "\n", + "We will use our converted model from the last example and optimize it.\n", + "\n", + "2.2.1. We will call now the `optimize` handler:" + ] + }, + { + "cell_type": "code", + "metadata": { + "pycharm": { + "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:15:03.415997Z", + "start_time": "2026-02-10T14:15:00.637332Z" + } + }, + "source": "# Construct the ONNX model path from the run directory structure:\nonnx_model_path = os.path.join(ARTIFACT_PATH, \"onnx-utils-to-onnx\", \"0\", \"model\")\n\nonnx_utils_function.run(\n handler=\"optimize\",\n output_path=ARTIFACT_PATH,\n params={\n \"model_path\": onnx_model_path,\n \"handler_init_kwargs\": {\"model_name\": ONNX_MODEL_NAME},\n \"optimized_model_name\": OPTIMIZED_ONNX_MODEL_NAME,\n },\n local=True\n)", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:15:00,639 [info] Storing function: {\"db\":null,\"name\":\"onnx-utils-optimize\",\"uid\":\"0c30d7af94814dcabde8152a1951fb5d\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "\n", + "
\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:15:01NaTcompletedrunonnx-utils-optimize
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_path=/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-to-onnx/0/model
handler_init_kwargs={'model_name': 'onnx_mobilenetv2'}
optimized_model_name=optimized_onnx_mobilenetv2
model=store://models/onnx-utils/optimized_onnx_mobilenetv2#0@0c30d7af94814dcabde8152a1951fb5d^599547984e83a664dc1c2708607d06731edb5ac2
\n", + "
\n", + "
\n", + "
\n", + " Title\n", + " ×\n", + "
\n", + " \n", + "
\n", + "
\n" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + " > to track results use the .show() or .logs() methods or click here to open in UI" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:15:03,414 [info] Run execution finished: {\"name\":\"onnx-utils-optimize\",\"status\":\"completed\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 6 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "2.2.2. And now our model was optimized. Let us verify:" + ] + }, + { + "cell_type": "code", + "metadata": { + "pycharm": { + "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:15:05.748413Z", + "start_time": "2026-02-10T14:15:05.745309Z" + } + }, + "source": [ + "optimized_model_file = os.path.join(ARTIFACT_PATH, \"onnx-utils-optimize\", \"0\", \"model\", \"optimized_onnx_mobilenetv2.onnx\")\n", + "assert os.path.isfile(optimized_model_file), f\"Optimized ONNX model not found at {optimized_model_file}\"\n", + "print(f\"Optimized ONNX model created at: {optimized_model_file}\")" + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimized ONNX model created at: /var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-optimize/0/model/optimized_onnx_mobilenetv2.onnx\n" + ] + } + ], + "execution_count": 7 + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "Lastly, run this code to clean up all generated files and directories:" + ] + }, + { + "cell_type": "code", + "metadata": { + "pycharm": { + "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:00:28.409998Z", + "start_time": "2026-02-10T13:57:21.679146Z" + } + }, + "source": "import shutil\n\n# Clean up the temporary artifact directory:\nif os.path.exists(ARTIFACT_PATH):\n shutil.rmtree(ARTIFACT_PATH)", + "outputs": [], + "execution_count": null + } + ], + "metadata": { + "kernelspec": { + "display_name": "mlrun_functions", + "language": "python", + "name": "mlrun_functions" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.22" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/functions/development/onnx_utils/1.4.0/src/onnx_utils.py b/functions/development/onnx_utils/1.4.0/src/onnx_utils.py new file mode 100644 index 00000000..c26e011b --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/src/onnx_utils.py @@ -0,0 +1,271 @@ +# Copyright 2019 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, Callable, Dict, List, Tuple + +import mlrun + + +class _ToONNXConversions: + """ + An ONNX conversion functions library class. + """ + + @staticmethod + def tf_keras_to_onnx( + model_handler, + onnx_model_name: str = None, + optimize_model: bool = True, + input_signature: List[Tuple[Tuple[int], str]] = None, + ): + """ + Convert a TF.Keras model to an ONNX model and log it back to MLRun as a new model object. + + :param model_handler: An initialized TFKerasModelHandler with a loaded model to convert to ONNX. + :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name` + will be used with an additional suffix `_onnx`. Defaulted to None. + :param optimize_model: Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the model. + Defaulted to True. + :param input_signature: A list of the input layers shape and data type properties. Expected to receive a list + where each element is an input layer tuple. An input layer tuple is a tuple of: + [0] = Layer's shape, a tuple of integers. + [1] = Layer's data type, a mlrun.data_types.ValueType string. + If None, the input signature will be tried to be read from the model artifact. Defaulted + to None. + """ + # Import the framework and handler: + import tensorflow as tf + from mlrun.frameworks.tf_keras import TFKerasUtils + + # Check the given 'input_signature' parameter: + if input_signature is None: + # Read the inputs from the model: + try: + model_handler.read_inputs_from_model() + except Exception as error: + raise mlrun.errors.MLRunRuntimeError( + f"Please provide the 'input_signature' parameter. The function tried reading the input layers " + f"information automatically but failed with the following error: {error}" + ) + else: + # Parse the 'input_signature' parameter: + input_signature = [ + tf.TensorSpec( + shape=shape, + dtype=TFKerasUtils.convert_value_type_to_tf_dtype( + value_type=value_type + ), + ) + for (shape, value_type) in input_signature + ] + + # Convert to ONNX: + model_handler.to_onnx( + model_name=onnx_model_name, + input_signature=input_signature, + optimize=optimize_model, + ) + + @staticmethod + def pytorch_to_onnx( + model_handler, + onnx_model_name: str = None, + optimize_model: bool = True, + input_signature: List[Tuple[Tuple[int, ...], str]] = None, + input_layers_names: List[str] = None, + output_layers_names: List[str] = None, + dynamic_axes: Dict[str, Dict[int, str]] = None, + is_batched: bool = True, + ): + """ + Convert a PyTorch model to an ONNX model and log it back to MLRun as a new model object. + + :param model_handler: An initialized PyTorchModelHandler with a loaded model to convert to ONNX. + :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given + `model_name` will be used with an additional suffix `_onnx`. Defaulted to None. + :param optimize_model: Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the + model. Defaulted to True. + :param input_signature: A list of the input layers shape and data type properties. Expected to receive a + list where each element is an input layer tuple. An input layer tuple is a tuple of: + [0] = Layer's shape, a tuple of integers. + [1] = Layer's data type, a mlrun.data_types.ValueType string. + If None, the input signature will be tried to be read from the model artifact. + Defaulted to None. + :param input_layers_names: List of names to assign to the input nodes of the graph in order. All of the other + parameters (inner layers) can be set as well by passing additional names in the + list. The order is by the order of the parameters in the model. If None, the inputs + will be read from the handler's inputs. If its also None, it is defaulted to: + "input_0", "input_1", ... + :param output_layers_names: List of names to assign to the output nodes of the graph in order. If None, the + outputs will be read from the handler's outputs. If its also None, it is defaulted + to: "output_0" (for multiple outputs, this parameter must be provided). + :param dynamic_axes: If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can + specify it by giving a dynamic axis to the input / output layer by its name as + follows: { + "input layer name": {0: "batch_size"}, + "output layer name": {0: "batch_size"}, + } + If provided, the 'is_batched' flag will be ignored. Defaulted to None. + :param is_batched: Whether to include a batch size as the first axis in every input and output layer. + Defaulted to True. Will be ignored if 'dynamic_axes' is provided. + """ + # Import the framework and handler: + import torch + from mlrun.frameworks.pytorch import PyTorchUtils + + # Parse the 'input_signature' parameter: + if input_signature is not None: + input_signature = tuple( + [ + torch.zeros( + size=shape, + dtype=PyTorchUtils.convert_value_type_to_torch_dtype( + value_type=value_type + ), + ) + for (shape, value_type) in input_signature + ] + ) + + # Convert to ONNX: + model_handler.to_onnx( + model_name=onnx_model_name, + input_sample=input_signature, + optimize=optimize_model, + input_layers_names=input_layers_names, + output_layers_names=output_layers_names, + dynamic_axes=dynamic_axes, + is_batched=is_batched, + ) + + +# Map for getting the conversion function according to the provided framework: +_CONVERSION_MAP = { + "tensorflow.keras": _ToONNXConversions.tf_keras_to_onnx, + "torch": _ToONNXConversions.pytorch_to_onnx, +} # type: Dict[str, Callable] + + +def to_onnx( + context: mlrun.MLClientCtx, + model_path: str, + load_model_kwargs: dict = None, + onnx_model_name: str = None, + optimize_model: bool = True, + framework_kwargs: Dict[str, Any] = None, +): + """ + Convert the given model to an ONNX model. + + :param context: The MLRun function execution context + :param model_path: The model path store object. + :param load_model_kwargs: Keyword arguments to pass to the `AutoMLRun.load_model` method. + :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name` will + be used with an additional suffix `_onnx`. Defaulted to None. + :param optimize_model: Whether to optimize the ONNX model using 'onnxoptimizer' before saving the model. + Defaulted to True. + :param framework_kwargs: Additional arguments each framework may require to convert to ONNX. To get the doc string + of the desired framework onnx conversion function, pass "help". + """ + from mlrun.frameworks.auto_mlrun.auto_mlrun import AutoMLRun + + # Get a model handler of the required framework: + load_model_kwargs = load_model_kwargs or {} + model_handler = AutoMLRun.load_model( + model_path=model_path, context=context, **load_model_kwargs + ) + + # Get the model's framework: + framework = model_handler.FRAMEWORK_NAME + + # Use the conversion map to get the specific framework to onnx conversion: + if framework not in _CONVERSION_MAP: + raise mlrun.errors.MLRunInvalidArgumentError( + f"The following framework: '{framework}', has no ONNX conversion." + ) + conversion_function = _CONVERSION_MAP[framework] + + # Check if needed to print the function's doc string ("help" is passed): + if framework_kwargs == "help": + print(conversion_function.__doc__) + return + + # Set the default empty framework kwargs if needed: + if framework_kwargs is None: + framework_kwargs = {} + + # Run the conversion: + try: + conversion_function( + model_handler=model_handler, + onnx_model_name=onnx_model_name, + optimize_model=optimize_model, + **framework_kwargs, + ) + except TypeError as exception: + raise mlrun.errors.MLRunInvalidArgumentError( + f"ERROR: A TypeError exception was raised during the conversion:\n{exception}. " + f"Please read the {framework} framework conversion function doc string by passing 'help' in the " + f"'framework_kwargs' dictionary parameter." + ) + + +def optimize( + context: mlrun.MLClientCtx, + model_path: str, + handler_init_kwargs: dict = None, + optimizations: List[str] = None, + fixed_point: bool = False, + optimized_model_name: str = None, +): + """ + Optimize the given ONNX model. + + :param context: The MLRun function execution context. + :param model_path: Path to the ONNX model object. + :param handler_init_kwargs: Keyword arguments to pass to the `ONNXModelHandler` init method preloading. + :param optimizations: List of possible optimizations. To see what optimizations are available, pass "help". + If None, all the optimizations will be used. Defaulted to None. + :param fixed_point: Optimize the weights using fixed point. Defaulted to False. + :param optimized_model_name: The name of the optimized model. If None, the original model will be overridden. + Defaulted to None. + """ + # Import the model handler: + import onnxoptimizer + from mlrun.frameworks.onnx import ONNXModelHandler + + # Check if needed to print the available optimizations ("help" is passed): + if optimizations == "help": + available_passes = "\n* ".join(onnxoptimizer.get_available_passes()) + print(f"The available optimizations are:\n* {available_passes}") + return + + # Create the model handler: + handler_init_kwargs = handler_init_kwargs or {} + model_handler = ONNXModelHandler( + model_path=model_path, context=context, **handler_init_kwargs + ) + + # Load the ONNX model: + model_handler.load() + + # Optimize the model using the given configurations: + model_handler.optimize(optimizations=optimizations, fixed_point=fixed_point) + + # Rename if needed: + if optimized_model_name is not None: + model_handler.set_model_name(model_name=optimized_model_name) + + # Log the optimized model: + model_handler.log() diff --git a/functions/development/onnx_utils/1.4.0/src/requirements.txt b/functions/development/onnx_utils/1.4.0/src/requirements.txt new file mode 100644 index 00000000..912b3d7e --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/src/requirements.txt @@ -0,0 +1,10 @@ +tqdm~=4.67.1 +tensorflow~=2.19.0 +tf_keras~=2.19.0 +torch~=2.8 +torchvision~=0.23.0 +onnx~=1.17.0 +onnxruntime~=1.19.2 +onnxoptimizer~=0.3.13 +onnxmltools~=1.13.0 +plotly~=5.23 \ No newline at end of file diff --git a/functions/development/onnx_utils/1.4.0/src/test_onnx_utils.py b/functions/development/onnx_utils/1.4.0/src/test_onnx_utils.py new file mode 100644 index 00000000..59c6c2b3 --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/src/test_onnx_utils.py @@ -0,0 +1,432 @@ +# Copyright 2019 Iguazio +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import shutil +import tempfile + +import mlrun +import pytest + +PROJECT_NAME = "onnx-utils" + +# Choose our model's name: +MODEL_NAME = "model" + +# Choose our ONNX version model's name: +ONNX_MODEL_NAME = f"onnx_{MODEL_NAME}" + +# Choose our optimized ONNX version model's name: +OPTIMIZED_ONNX_MODEL_NAME = f"optimized_{ONNX_MODEL_NAME}" + +REQUIRED_ENV_VARS = [ + "MLRUN_DBPATH", + "MLRUN_ARTIFACT_PATH", + "V3IO_USERNAME", + "V3IO_ACCESS_KEY", +] + + +def _validate_environment_variables() -> bool: + """ + Checks that all required Environment variables are set. + """ + environment_keys = os.environ.keys() + return all(key in environment_keys for key in REQUIRED_ENV_VARS) + + +def _is_tf2onnx_available() -> bool: + """ + Check if tf2onnx is installed (required for TensorFlow/Keras ONNX conversion). + """ + try: + import tf2onnx + return True + except ImportError: + return False + + +@pytest.fixture(scope="session") +def onnx_project(): + """Create/get the MLRun project once per test session.""" + return mlrun.get_or_create_project(PROJECT_NAME, context="./") + + +@pytest.fixture(autouse=True) +def test_environment(onnx_project): + """Setup and cleanup test artifacts for each test.""" + artifact_path = tempfile.mkdtemp() + yield artifact_path + # Cleanup - only remove files/dirs from the directory containing this test file, + # never from an arbitrary CWD (which could be the project root). + test_dir = os.path.dirname(os.path.abspath(__file__)) + for test_output in [ + "schedules", + "runs", + "artifacts", + "functions", + "model.pt", + "model.zip", + "model_modules_map.json", + "model_modules_map.json.json", + "onnx_model.onnx", + "optimized_onnx_model.onnx", + ]: + test_output_path = os.path.join(test_dir, test_output) + if os.path.exists(test_output_path): + if os.path.isdir(test_output_path): + shutil.rmtree(test_output_path) + else: + os.remove(test_output_path) + if os.path.exists(artifact_path): + shutil.rmtree(artifact_path) + + +def _log_tf_keras_model(context: mlrun.MLClientCtx, model_name: str): + """ + Create and log a tf.keras model - MobileNetV2. + + :param context: The context to log to. + :param model_name: The model name to use. + """ + # To use `tf_keras` instead of `tensorflow.keras` + os.environ["TF_USE_LEGACY_KERAS"] = "true" + from mlrun.frameworks.tf_keras import TFKerasModelHandler + from tensorflow import keras + + # Download the MobileNetV2 model: + model = keras.applications.mobilenet_v2.MobileNetV2() + + # Initialize a model handler for logging the model: + model_handler = TFKerasModelHandler( + model_name=model_name, model=model, context=context + ) + + # Log the model: + model_handler.log() + + +def _log_pytorch_model(context: mlrun.MLClientCtx, model_name: str): + """ + Create and log a pytorch model - MobileNetV2. + + :param context: The context to log to. + :param model_name: The model name to use. + """ + import torchvision + from mlrun.frameworks.pytorch import PyTorchModelHandler + + # Download the MobileNetV2 model: + model = torchvision.models.mobilenet_v2() + + # Initialize a model handler for logging the model: + model_handler = PyTorchModelHandler( + model_name=model_name, + model=model, + model_class="mobilenet_v2", + modules_map={"torchvision.models": "mobilenet_v2"}, + context=context, + ) + + # Log the model: + model_handler.log() + + +@pytest.mark.skipif( + condition=not _validate_environment_variables(), + reason="Project's environment variables are not set", +) +def test_to_onnx_help(test_environment): + """ + Test the 'to_onnx' handler, passing "help" in the 'framework_kwargs'. + """ + artifact_path = test_environment + + # Create the function: + log_model_function = mlrun.code_to_function( + filename="test_onnx_utils.py", + name="log_model", + project=PROJECT_NAME, + kind="job", + image="mlrun/ml-models", + ) + + # Run the function to log the model: + log_model_function.run( + handler="_log_pytorch_model", + output_path=artifact_path, + params={"model_name": MODEL_NAME}, + local=True, + ) + + # Get artifact paths - construct from artifact_path and run structure + run_artifact_dir = os.path.join(artifact_path, "log-model--log-pytorch-model", "0") + model_path = os.path.join(run_artifact_dir, "model") + modules_map_path = os.path.join(run_artifact_dir, "model_modules_map.json.json") + + # Import the ONNX Utils function: + onnx_function = mlrun.import_function("function.yaml", project=PROJECT_NAME) + + # Run the function, passing "help" in 'framework_kwargs' and see that no exception was raised: + is_test_passed = True + try: + onnx_function.run( + handler="to_onnx", + output_path=artifact_path, + params={ + # Take the logged model from the previous function. + "model_path": model_path, + "load_model_kwargs": { + "model_name": MODEL_NAME, + "model_class": "mobilenet_v2", + "modules_map": modules_map_path, + }, + "framework_kwargs": "help", + }, + local=True, + ) + except TypeError as exception: + print( + f"The test failed, the help was not handled properly and raised the following error: {exception}" + ) + is_test_passed = False + + assert is_test_passed + + +@pytest.mark.skipif( + condition=not _validate_environment_variables(), + reason="Project's environment variables are not set", +) +@pytest.mark.skipif( + condition=not _is_tf2onnx_available(), + reason="tf2onnx is not installed", +) +def test_tf_keras_to_onnx(test_environment): + """ + Test the 'to_onnx' handler, giving it a tf.keras model. + """ + artifact_path = test_environment + + # Create the function: + log_model_function = mlrun.code_to_function( + filename="test_onnx_utils.py", + name="log_model", + project=PROJECT_NAME, + kind="job", + image="mlrun/ml-models", + ) + + # Run the function to log the model: + log_model_run = log_model_function.run( + handler="_log_tf_keras_model", + output_path=artifact_path, + params={"model_name": MODEL_NAME}, + local=True, + ) + + # Import the ONNX Utils function: + onnx_function = mlrun.import_function("function.yaml", project=PROJECT_NAME) + + # Run the function to convert our model to ONNX: + onnx_function_run = onnx_function.run( + handler="to_onnx", + output_path=artifact_path, + params={ + # Take the logged model from the previous function. + "model_path": log_model_run.status.artifacts[0]["spec"]["target_path"], + "load_model_kwargs": {"model_name": MODEL_NAME}, + "onnx_model_name": ONNX_MODEL_NAME, + }, + local=True, + ) + + # Print the outputs list: + print(f"Produced outputs: {onnx_function_run.outputs}") + + # Verify the '.onnx' model was created: + assert "model" in onnx_function_run.outputs + + +@pytest.mark.skipif( + condition=not _validate_environment_variables(), + reason="Project's environment variables are not set", +) +def test_pytorch_to_onnx(test_environment): + """ + Test the 'to_onnx' handler, giving it a pytorch model. + """ + artifact_path = test_environment + + # Create the function: + log_model_function = mlrun.code_to_function( + filename="test_onnx_utils.py", + name="log_model", + project=PROJECT_NAME, + kind="job", + image="mlrun/ml-models", + ) + + # Run the function to log the model: + log_model_run = log_model_function.run( + handler="_log_pytorch_model", + output_path=artifact_path, + params={"model_name": MODEL_NAME}, + local=True, + ) + + # Import the ONNX Utils function: + onnx_function = mlrun.import_function("function.yaml", project=PROJECT_NAME) + + # Get artifact paths - construct from artifact_path and run structure + run_artifact_dir = os.path.join(artifact_path, "log-model--log-pytorch-model", "0") + model_path = os.path.join(run_artifact_dir, "model") + modules_map_path = os.path.join(run_artifact_dir, "model_modules_map.json.json") + + # Run the function to convert our model to ONNX: + onnx_function_run = onnx_function.run( + handler="to_onnx", + output_path=artifact_path, + params={ + # Take the logged model from the previous function. + "model_path": model_path, + "load_model_kwargs": { + "model_name": MODEL_NAME, + "model_class": "mobilenet_v2", + "modules_map": modules_map_path, + }, + "onnx_model_name": ONNX_MODEL_NAME, + "framework_kwargs": {"input_signature": [((32, 3, 224, 224), "float32")]}, + }, + local=True, + ) + + # Print the outputs list: + print(f"Produced outputs: {onnx_function_run.outputs}") + + # Verify the '.onnx' model was created: + assert "model" in onnx_function_run.outputs + + +@pytest.mark.skipif( + condition=not _validate_environment_variables(), + reason="Project's environment variables are not set", +) +def test_optimize_help(test_environment): + """ + Test the 'optimize' handler, passing "help" in the 'optimizations'. + """ + artifact_path = test_environment + + # Import the ONNX Utils function: + onnx_function = mlrun.import_function("function.yaml", project=PROJECT_NAME) + + # Run the function, passing "help" in 'optimizations' and see that no exception was raised: + is_test_passed = True + try: + onnx_function.run( + handler="optimize", + output_path=artifact_path, + params={ + "model_path": "", + "optimizations": "help", + }, + local=True, + ) + except TypeError as exception: + print( + f"The test failed, the help was not handled properly and raised the following error: {exception}" + ) + is_test_passed = False + + assert is_test_passed + + +@pytest.mark.skipif( + condition=not _validate_environment_variables(), + reason="Project's environment variables are not set", +) +def test_optimize(test_environment): + """ + Test the 'optimize' handler, giving it a pytorch model converted to ONNX. + """ + artifact_path = test_environment + + # Create the function: + log_model_function = mlrun.code_to_function( + filename="test_onnx_utils.py", + name="log_model", + project=PROJECT_NAME, + kind="job", + image="mlrun/ml-models", + ) + + # Run the function to log the model: + log_model_function.run( + handler="_log_pytorch_model", + output_path=artifact_path, + params={"model_name": MODEL_NAME}, + local=True, + ) + + # Get artifact paths - construct from artifact_path and run structure + run_artifact_dir = os.path.join(artifact_path, "log-model--log-pytorch-model", "0") + model_path = os.path.join(run_artifact_dir, "model") + modules_map_path = os.path.join(run_artifact_dir, "model_modules_map.json.json") + + # Import the ONNX Utils function: + onnx_function = mlrun.import_function("function.yaml", project=PROJECT_NAME) + + # Run the function to convert our model to ONNX: + onnx_function.run( + handler="to_onnx", + output_path=artifact_path, + params={ + # Take the logged model from the previous function. + "model_path": model_path, + "load_model_kwargs": { + "model_name": MODEL_NAME, + "model_class": "mobilenet_v2", + "modules_map": modules_map_path, + }, + "onnx_model_name": ONNX_MODEL_NAME, + "framework_kwargs": {"input_signature": [((32, 3, 224, 224), "float32")]}, + }, + local=True, + ) + + # Get the ONNX model path from the to_onnx run output + onnx_run_artifact_dir = os.path.join( + artifact_path, "onnx-utils-to-onnx", "0" + ) + onnx_model_path = os.path.join(onnx_run_artifact_dir, "model") + + # Run the function to optimize our model: + optimize_function_run = onnx_function.run( + handler="optimize", + output_path=artifact_path, + params={ + # Take the logged model from the previous function. + "model_path": onnx_model_path, + "handler_init_kwargs": {"model_name": ONNX_MODEL_NAME}, + "optimized_model_name": OPTIMIZED_ONNX_MODEL_NAME, + }, + local=True, + ) + + # Print the outputs list: + print(f"Produced outputs: {optimize_function_run.outputs}") + + # Verify the '.onnx' model was created: + assert "model" in optimize_function_run.outputs diff --git a/functions/development/onnx_utils/1.4.0/static/documentation.html b/functions/development/onnx_utils/1.4.0/static/documentation.html new file mode 100644 index 00000000..3faa1b78 --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/static/documentation.html @@ -0,0 +1,277 @@ + + + + + + + +onnx_utils package + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+
+
+ +
+ +
+
+
+ + + + +
+
+
+
+
+
+
+
+
+
+
+
+
+ + + + +
+
+
+
+
+

onnx_utils package

+ +
+ +
+
+ +
+
+

onnx_utils package#

+
+

Submodules#

+
+
+

onnx_utils.onnx_utils module#

+
+
+onnx_utils.onnx_utils.optimize(context: MLClientCtx, model_path: str, handler_init_kwargs: dict | None = None, optimizations: List[str] | None = None, fixed_point: bool = False, optimized_model_name: str | None = None)[source]#
+

Optimize the given ONNX model.

+
+
Parameters:
+
    +
  • context – The MLRun function execution context.

  • +
  • model_path – Path to the ONNX model object.

  • +
  • handler_init_kwargs – Keyword arguments to pass to the ONNXModelHandler init method preloading.

  • +
  • optimizations – List of possible optimizations. To see what optimizations are available, pass “help”. +If None, all the optimizations will be used. Defaulted to None.

  • +
  • fixed_point – Optimize the weights using fixed point. Defaulted to False.

  • +
  • optimized_model_name – The name of the optimized model. If None, the original model will be overridden. +Defaulted to None.

  • +
+
+
+
+
+
+onnx_utils.onnx_utils.to_onnx(context: MLClientCtx, model_path: str, load_model_kwargs: dict | None = None, onnx_model_name: str | None = None, optimize_model: bool = True, framework_kwargs: Dict[str, Any] | None = None)[source]#
+

Convert the given model to an ONNX model.

+
+
Parameters:
+
    +
  • context – The MLRun function execution context

  • +
  • model_path – The model path store object.

  • +
  • load_model_kwargs – Keyword arguments to pass to the AutoMLRun.load_model method.

  • +
  • onnx_model_name – The name to use to log the converted ONNX model. If not given, the given model_name will +be used with an additional suffix _onnx. Defaulted to None.

  • +
  • optimize_model – Whether to optimize the ONNX model using ‘onnxoptimizer’ before saving the model. +Defaulted to True.

  • +
  • framework_kwargs – Additional arguments each framework may require to convert to ONNX. To get the doc string +of the desired framework onnx conversion function, pass “help”.

  • +
+
+
+
+
+
+

Module contents#

+
+
+
+
+
+
+
+
+ +
+
+ +
+
+
+
+ + + +
+
+ + \ No newline at end of file diff --git a/functions/development/onnx_utils/1.4.0/static/example.html b/functions/development/onnx_utils/1.4.0/static/example.html new file mode 100644 index 00000000..add12b6c --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/static/example.html @@ -0,0 +1,1160 @@ + + + + + + + +ONNX Utils + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+
+
+ +
+ +
+
+
+ + + + +
+
+
+
+
+
+
+
+
+
+
+
+
+ + + + +
+
+
+
+ + +
+
+

ONNX Utils#

+

A collection of ONNX utils in one MLRun function. The function includes the following handlers:

+
    +
  1. to_onnx - Convert your model into onnx format.

  2. +
  3. optimize - Perform ONNX optimizations using onnxmodeloptimizer on a given ONNX model.

  4. +
+

+
+

1. to_onnx#

+
+

1.1. Docs#

+

Convert the given model to an ONNX model.

+
+

Parameters:#

+
    +
  • context: mlrun.MLClientCtx - The MLRun function execution context

  • +
  • model_path: str - The model path store object.

  • +
  • onnx_model_name: str = None - The name to use to log the converted ONNX model. If not given, the given model_name will be used with an additional suffix _onnx. Defaulted to None.

  • +
  • optimize_model: bool = True - Whether to optimize the ONNX model using ‘onnxoptimizer’ before saving the model. Defaulted to True.

  • +
  • framework: str = None - The model’s framework. If None, it will be read from the ‘framework’ label of the model artifact provided. Defaulted to None.

  • +
  • framework_kwargs: Dict[str, Any] = None - Additional arguments each framework may require in order to convert to ONNX. To get the doc string of the desired framework onnx conversion function, pass “help”.

  • +
+
+
+

Supported keyword arguments (framework_kwargs) per framework:#

+

tensorflow.keras:

+
    +
  • input_signature: List[Tuple[Tuple[int], str]] = None - A list of the input layers shape and data type properties. Expected to receive a list where each element is an input layer tuple. An input layer tuple is a tuple of:

    +
      +
    • [0] = Layer’s shape, a tuple of integers.

    • +
    • [1] = Layer’s data type, a mlrun.data_types.ValueType string.

    • +
    +

    If None, the input signature will be tried to be read automatically before converting to ONNX or from the model artifact if available. Defaulted to None.

    +
  • +
+

torch:

+
    +
  • input_signature: List[Tuple[Tuple[int], str]] = None - A list of the input layers shape and data type properties. Expected to receive a list where each element is an input layer tuple. An input layer tuple is a tuple of:

    +
      +
    • [0] = Layer’s shape, a tuple of integers.

    • +
    • [1] = Layer’s data type, a mlrun.data_types.ValueType string.

    • +
    +

    If None, the input signature will be read from the model artifact if available. Defaulted to None.

    +
  • +
  • input_layers_names: List[str] = None - List of names to assign to the input nodes of the graph in order. All of the other parameters (inner layers) can be set as well by passing additional names in the list. The order is by the order of the parameters in the model. If None, the inputs will be read from the handler’s inputs. If its also None, it is defaulted to: “input_0”, “input_1”, …

  • +
  • output_layers_names: List[str] = None - List of names to assign to the output nodes of the graph in order. If None, the outputs will be read from the handler’s outputs. If its also None, it is defaulted to: “output_0” (for multiple outputs, this parameter must be provided).

  • +
  • param dynamic_axes: Dict[str, Dict[int, str]] = None - If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can specify it by giving a dynamic axis to the input / output layer by its name as follows:

  • +
+
{
+    "input layer name": {0: "batch_size"},
+    "output layer name": {0: "batch_size"},
+}
+
+
+

If provided, the ‘is_batched’ flag will be ignored. Defaulted to None.

+
    +
  • is_batched: bool = True - Whether to include a batch size as the first axis in every input and output layer. Defaulted to True. Will be ignored if ‘dynamic_axes’ is provided.

  • +
+
+
+
+

1.2. Demo#

+

We will use the PyTorch framework, a MobileNetV2 as our model and we will convert it to ONNX using the to_onnx handler.

+

1.2.1. First we will set the artifact path for our model to be saved in and choose the models names:

+
+
+
import os
+import tempfile
+# Use a temporary directory for model artifacts (safe cleanup):
+ARTIFACT_PATH = tempfile.mkdtemp()
+os.environ["MLRUN_ARTIFACT_PATH"] = ARTIFACT_PATH
+
+# Project name:
+PROJECT_NAME = "onnx-utils"
+
+# Choose our model's name:
+MODEL_NAME = "mobilenetv2"
+
+# Choose our ONNX version model's name:
+ONNX_MODEL_NAME = "onnx_mobilenetv2"
+
+# Choose our optimized ONNX version model's name:
+OPTIMIZED_ONNX_MODEL_NAME = "optimized_onnx_mobilenetv2"
+
+
+
+
+

1.2.2. Download the model from torchvision.models and log it with MLRun’s PyTorchModelHandler:

+
+
+
# mlrun: start-code
+
+
+
+
+
+
+
import torchvision
+
+import mlrun
+from mlrun.frameworks.pytorch import PyTorchModelHandler
+
+
+def get_model(context: mlrun.MLClientCtx, model_name: str):
+    # Download the MobileNetV2 model:
+    model = torchvision.models.mobilenet_v2()
+
+    # Initialize a model handler for logging the model:
+    model_handler = PyTorchModelHandler(
+        model_name=model_name,
+        model=model,
+        model_class="mobilenet_v2",
+        modules_map={"torchvision.models": "mobilenet_v2"},
+        context=context,
+    )
+
+    # Log the model:
+    model_handler.log()
+
+
+
+
+
+
+
# mlrun: end-code
+
+
+
+
+
+
+
import mlrun
+
+# Create or get the MLRun project:
+project = mlrun.get_or_create_project(PROJECT_NAME, context="./")
+
+# Create the function parsing this notebook's code using 'code_to_function':
+get_model_function = mlrun.code_to_function(
+    name="get_mobilenetv2",
+    project=PROJECT_NAME,
+    kind="job",
+    image="mlrun/ml-models"
+)
+
+# Run the function to log the model:
+get_model_run = get_model_function.run(
+    handler="get_model",
+    output_path=ARTIFACT_PATH,
+    params={
+        "model_name": MODEL_NAME
+    },
+    local=True
+)
+
+
+
+
+
> 2026-02-10 16:14:24,932 [info] Created and saved project: {"context":"./","from_template":null,"name":"onnx-utils","overwrite":false,"save":true}
+> 2026-02-10 16:14:24,933 [info] Project created successfully: {"project_name":"onnx-utils","stored_in_db":true}
+> 2026-02-10 16:14:31,659 [info] Storing function: {"db":null,"name":"get-mobilenetv2-get-model","uid":"7b9d1b54375b44e191d73685a382c910"}
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:14:32NaTcompletedrunget-mobilenetv2-get-model
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_name=mobilenetv2
mobilenetv2_modules_map.json=store://artifacts/onnx-utils/#0@7b9d1b54375b44e191d73685a382c910
model=store://models/onnx-utils/mobilenetv2#0@7b9d1b54375b44e191d73685a382c910^e0393bc5b070fd55cc57cecb94160ce412498e0f
+
+ +
+

+
+
+
> to track results use the .show() or .logs() methods or click here to open in UI
> 2026-02-10 16:14:34,427 [info] Run execution finished: {"name":"get-mobilenetv2-get-model","status":"completed"}
+
+
+
+
+

1.2.4. Import the onnx_utils MLRun function and run it:

+
+
+
# Import the ONNX function from the marketplace:
+onnx_utils_function = mlrun.import_function("hub://onnx_utils", project=PROJECT_NAME)
+
+# Construct the model path from the run directory structure:
+model_path = os.path.join(ARTIFACT_PATH, "get-mobilenetv2-get-model", "0", "model")
+modules_map_path = os.path.join(ARTIFACT_PATH, "get-mobilenetv2-get-model", "0", "mobilenetv2_modules_map.json.json")
+
+# Run the function to convert our model to ONNX:
+to_onnx_run = onnx_utils_function.run(
+    handler="to_onnx",
+    output_path=ARTIFACT_PATH,
+    params={
+        "model_name": MODEL_NAME,
+        "model_path": model_path,
+        "load_model_kwargs": {
+            "model_name": MODEL_NAME,
+            "model_class": "mobilenet_v2",
+            "modules_map": modules_map_path,
+        },
+        "onnx_model_name": ONNX_MODEL_NAME,
+        "optimize_model": False,  # <- For optimizing it later in the demo, we mark the flag as False
+        "framework_kwargs": {"input_signature": [((32, 3, 224, 224), "float32")]},
+    },
+    local=True
+)
+
+
+
+
+
> 2026-02-10 16:14:48,519 [info] Storing function: {"db":null,"name":"onnx-utils-to-onnx","uid":"95deb2c7dbf0460291efb25c48eeebd7"}
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:14:49NaTcompletedrunonnx-utils-to-onnx
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_name=mobilenetv2
model_path=/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/get-mobilenetv2-get-model/0/model
load_model_kwargs={'model_name': 'mobilenetv2', 'model_class': 'mobilenet_v2', 'modules_map': '/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/get-mobilenetv2-get-model/0/mobilenetv2_modules_map.json.json'}
onnx_model_name=onnx_mobilenetv2
optimize_model=False
framework_kwargs={'input_signature': [((32, 3, 224, 224), 'float32')]}
model=store://models/onnx-utils/onnx_mobilenetv2#0@95deb2c7dbf0460291efb25c48eeebd7^03e4286da44d015cf5465d43e809a504d15f7f63
+
+ +
+

+
+
+
> to track results use the .show() or .logs() methods or click here to open in UI
> 2026-02-10 16:14:53,862 [info] Run execution finished: {"name":"onnx-utils-to-onnx","status":"completed"}
+
+
+
+
+

1.2.5. Now we verify the ONNX model was created:

+
+
+
import os
+
+onnx_model_file = os.path.join(ARTIFACT_PATH, "onnx-utils-to-onnx", "0", "model", "onnx_mobilenetv2.onnx")
+assert os.path.isfile(onnx_model_file), f"ONNX model not found at {onnx_model_file}"
+print(f"ONNX model created at: {onnx_model_file}")
+
+
+
+
+
ONNX model created at: /var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-to-onnx/0/model/onnx_mobilenetv2.onnx
+
+
+
+
+

+
+
+
+

2. optimize#

+
+

2.1. Docs#

+

Optimize the given ONNX model.

+
+

Parameters:#

+
    +
  • context: mlrun.MLClientCtx - The MLRun function execution context

  • +
  • model_path: str - The model path store object.

  • +
  • optimizations: List[str] = None - List of possible optimizations. To see what optimizations are available, pass “help”. If None, all of the optimizations will be used. Defaulted to None.

  • +
  • fixed_point: bool = False - Optimize the weights using fixed point. Defaulted to False.

  • +
  • optimized_model_name: str = None - The name of the optimized model. If None, the original model will be overridden. Defaulted to None.

  • +
+
+
+
+

2.2. Demo#

+

We will use our converted model from the last example and optimize it.

+

2.2.1. We will call now the optimize handler:

+
+
+
# Construct the ONNX model path from the run directory structure:
+onnx_model_path = os.path.join(ARTIFACT_PATH, "onnx-utils-to-onnx", "0", "model")
+
+onnx_utils_function.run(
+    handler="optimize",
+    output_path=ARTIFACT_PATH,
+    params={
+        "model_path": onnx_model_path,
+        "handler_init_kwargs": {"model_name": ONNX_MODEL_NAME},
+        "optimized_model_name": OPTIMIZED_ONNX_MODEL_NAME,
+    },
+    local=True
+)
+
+
+
+
+
> 2026-02-10 16:15:00,639 [info] Storing function: {"db":null,"name":"onnx-utils-optimize","uid":"0c30d7af94814dcabde8152a1951fb5d"}
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:15:01NaTcompletedrunonnx-utils-optimize
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_path=/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-to-onnx/0/model
handler_init_kwargs={'model_name': 'onnx_mobilenetv2'}
optimized_model_name=optimized_onnx_mobilenetv2
model=store://models/onnx-utils/optimized_onnx_mobilenetv2#0@0c30d7af94814dcabde8152a1951fb5d^599547984e83a664dc1c2708607d06731edb5ac2
+
+ +
+

+
+
+
> to track results use the .show() or .logs() methods or click here to open in UI
> 2026-02-10 16:15:03,414 [info] Run execution finished: {"name":"onnx-utils-optimize","status":"completed"}
+
+
+
<mlrun.model.RunObject at 0x106148190>
+
+
+
+
+

2.2.2. And now our model was optimized. Let us verify:

+
+
+
optimized_model_file = os.path.join(ARTIFACT_PATH, "onnx-utils-optimize", "0", "model", "optimized_onnx_mobilenetv2.onnx")
+assert os.path.isfile(optimized_model_file), f"Optimized ONNX model not found at {optimized_model_file}"
+print(f"Optimized ONNX model created at: {optimized_model_file}")
+
+
+
+
+
Optimized ONNX model created at: /var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-optimize/0/model/optimized_onnx_mobilenetv2.onnx
+
+
+
+
+

Lastly, run this code to clean up all generated files and directories:

+
+
+
import shutil
+
+# Clean up the temporary artifact directory:
+if os.path.exists(ARTIFACT_PATH):
+    shutil.rmtree(ARTIFACT_PATH)
+
+
+
+
+
+
+
+
+
+
+
+
+
+ +
+
+ +
+
+
+
+ + + +
+
+ + \ No newline at end of file diff --git a/functions/development/onnx_utils/1.4.0/static/function.html b/functions/development/onnx_utils/1.4.0/static/function.html new file mode 100644 index 00000000..d1b5444b --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/static/function.html @@ -0,0 +1,224 @@ + + + + + + + + + + + Source + + + + +
+        
+metadata:
+  name: onnx-utils
+  tag: ''
+  categories:
+  - utilities
+  - deep-learning
+kind: job
+spec:
+  entry_points:
+    tf_keras_to_onnx:
+      name: tf_keras_to_onnx
+      parameters:
+      - name: model_handler
+        doc: An initialized TFKerasModelHandler with a loaded model to convert to
+          ONNX.
+      - name: onnx_model_name
+        type: str
+        doc: The name to use to log the converted ONNX model. If not given, the given
+          `model_name` will be used with an additional suffix `_onnx`. Defaulted to
+          None.
+        default: null
+      - name: optimize_model
+        type: bool
+        doc: Whether or not to optimize the ONNX model using 'onnxoptimizer' before
+          saving the model. Defaulted to True.
+        default: true
+      - name: input_signature
+        type: List[Tuple[Tuple[int], str]]
+        doc: 'A list of the input layers shape and data type properties. Expected
+          to receive a list where each element is an input layer tuple. An input layer
+          tuple is a tuple of: [0] = Layer''s shape, a tuple of integers. [1] = Layer''s
+          data type, a mlrun.data_types.ValueType string. If None, the input signature
+          will be tried to be read from the model artifact. Defaulted to None.'
+        default: null
+      doc: Convert a TF.Keras model to an ONNX model and log it back to MLRun as a
+        new model object.
+      lineno: 26
+      has_varargs: false
+      has_kwargs: false
+    pytorch_to_onnx:
+      name: pytorch_to_onnx
+      parameters:
+      - name: model_handler
+        doc: An initialized PyTorchModelHandler with a loaded model to convert to
+          ONNX.
+      - name: onnx_model_name
+        type: str
+        doc: The name to use to log the converted ONNX model. If not given, the given
+          `model_name` will be used with an additional suffix `_onnx`. Defaulted to
+          None.
+        default: null
+      - name: optimize_model
+        type: bool
+        doc: Whether or not to optimize the ONNX model using 'onnxoptimizer' before
+          saving the model. Defaulted to True.
+        default: true
+      - name: input_signature
+        type: List[Tuple[Tuple[int, ], str]]
+        doc: 'A list of the input layers shape and data type properties. Expected
+          to receive a list where each element is an input layer tuple. An input layer
+          tuple is a tuple of: [0] = Layer''s shape, a tuple of integers. [1] = Layer''s
+          data type, a mlrun.data_types.ValueType string. If None, the input signature
+          will be tried to be read from the model artifact. Defaulted to None.'
+        default: null
+      - name: input_layers_names
+        type: List[str]
+        doc: 'List of names to assign to the input nodes of the graph in order. All
+          of the other parameters (inner layers) can be set as well by passing additional
+          names in the list. The order is by the order of the parameters in the model.
+          If None, the inputs will be read from the handler''s inputs. If its also
+          None, it is defaulted to: "input_0", "input_1", ...'
+        default: null
+      - name: output_layers_names
+        type: List[str]
+        doc: 'List of names to assign to the output nodes of the graph in order. If
+          None, the outputs will be read from the handler''s outputs. If its also
+          None, it is defaulted to: "output_0" (for multiple outputs, this parameter
+          must be provided).'
+        default: null
+      - name: dynamic_axes
+        type: Dict[str, Dict[int, str]]
+        doc: 'If part of the input / output shape is dynamic, like (batch_size, 3,
+          32, 32) you can specify it by giving a dynamic axis to the input / output
+          layer by its name as follows: { "input layer name": {0: "batch_size"}, "output
+          layer name": {0: "batch_size"}, } If provided, the ''is_batched'' flag will
+          be ignored. Defaulted to None.'
+        default: null
+      - name: is_batched
+        type: bool
+        doc: Whether to include a batch size as the first axis in every input and
+          output layer. Defaulted to True. Will be ignored if 'dynamic_axes' is provided.
+        default: true
+      doc: Convert a PyTorch model to an ONNX model and log it back to MLRun as a
+        new model object.
+      lineno: 81
+      has_varargs: false
+      has_kwargs: false
+    to_onnx:
+      name: to_onnx
+      parameters:
+      - name: context
+        type: MLClientCtx
+        doc: The MLRun function execution context
+      - name: model_path
+        type: str
+        doc: The model path store object.
+      - name: load_model_kwargs
+        type: dict
+        doc: Keyword arguments to pass to the `AutoMLRun.load_model` method.
+        default: null
+      - name: onnx_model_name
+        type: str
+        doc: The name to use to log the converted ONNX model. If not given, the given
+          `model_name` will be used with an additional suffix `_onnx`. Defaulted to
+          None.
+        default: null
+      - name: optimize_model
+        type: bool
+        doc: Whether to optimize the ONNX model using 'onnxoptimizer' before saving
+          the model. Defaulted to True.
+        default: true
+      - name: framework_kwargs
+        type: Dict[str, Any]
+        doc: Additional arguments each framework may require to convert to ONNX. To
+          get the doc string of the desired framework onnx conversion function, pass
+          "help".
+        default: null
+      doc: Convert the given model to an ONNX model.
+      lineno: 160
+      has_varargs: false
+      has_kwargs: false
+    optimize:
+      name: optimize
+      parameters:
+      - name: context
+        type: MLClientCtx
+        doc: The MLRun function execution context.
+      - name: model_path
+        type: str
+        doc: Path to the ONNX model object.
+      - name: handler_init_kwargs
+        type: dict
+        doc: Keyword arguments to pass to the `ONNXModelHandler` init method preloading.
+        default: null
+      - name: optimizations
+        type: List[str]
+        doc: List of possible optimizations. To see what optimizations are available,
+          pass "help". If None, all the optimizations will be used. Defaulted to None.
+        default: null
+      - name: fixed_point
+        type: bool
+        doc: Optimize the weights using fixed point. Defaulted to False.
+        default: false
+      - name: optimized_model_name
+        type: str
+        doc: The name of the optimized model. If None, the original model will be
+          overridden. Defaulted to None.
+        default: null
+      doc: Optimize the given ONNX model.
+      lineno: 224
+      has_varargs: false
+      has_kwargs: false
+  image: ''
+  default_handler: to_onnx
+  allow_empty_resources: true
+  command: ''
+  disable_auto_mount: false
+  description: ONNX intigration in MLRun, some utils functions for the ONNX framework,
+    optimizing and converting models from different framework to ONNX using MLRun.
+  build:
+    functionSourceCode: # Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Dict, List, Tuple

import mlrun


class _ToONNXConversions:
    """
    An ONNX conversion functions library class.
    """

    @staticmethod
    def tf_keras_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int], str]] = None,
    ):
        """
        Convert a TF.Keras model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:   An initialized TFKerasModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name`
                                will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:  Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                                Defaulted to True.
        :param input_signature: A list of the input layers shape and data type properties. Expected to receive a list
                                where each element is an input layer tuple. An input layer tuple is a tuple of:
                                [0] = Layer's shape, a tuple of integers.
                                [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                If None, the input signature will be tried to be read from the model artifact. Defaulted
                                to None.
        """
        # Import the framework and handler:
        import tensorflow as tf
        from mlrun.frameworks.tf_keras import TFKerasUtils

        # Check the given 'input_signature' parameter:
        if input_signature is None:
            # Read the inputs from the model:
            try:
                model_handler.read_inputs_from_model()
            except Exception as error:
                raise mlrun.errors.MLRunRuntimeError(
                    f"Please provide the 'input_signature' parameter. The function tried reading the input layers "
                    f"information automatically but failed with the following error: {error}"
                )
        else:
            # Parse the 'input_signature' parameter:
            input_signature = [
                tf.TensorSpec(
                    shape=shape,
                    dtype=TFKerasUtils.convert_value_type_to_tf_dtype(
                        value_type=value_type
                    ),
                )
                for (shape, value_type) in input_signature
            ]

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_signature=input_signature,
            optimize=optimize_model,
        )

    @staticmethod
    def pytorch_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int, ...], str]] = None,
        input_layers_names: List[str] = None,
        output_layers_names: List[str] = None,
        dynamic_axes: Dict[str, Dict[int, str]] = None,
        is_batched: bool = True,
    ):
        """
        Convert a PyTorch model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:       An initialized PyTorchModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name:     The name to use to log the converted ONNX model. If not given, the given
                                    `model_name` will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:      Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the
                                    model. Defaulted to True.
        :param input_signature:     A list of the input layers shape and data type properties. Expected to receive a
                                    list where each element is an input layer tuple. An input layer tuple is a tuple of:
                                    [0] = Layer's shape, a tuple of integers.
                                    [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                    If None, the input signature will be tried to be read from the model artifact.
                                    Defaulted to None.
        :param input_layers_names:  List of names to assign to the input nodes of the graph in order. All of the other
                                    parameters (inner layers) can be set as well by passing additional names in the
                                    list. The order is by the order of the parameters in the model. If None, the inputs
                                    will be read from the handler's inputs. If its also None, it is defaulted to:
                                    "input_0", "input_1", ...
        :param output_layers_names: List of names to assign to the output nodes of the graph in order. If None, the
                                    outputs will be read from the handler's outputs. If its also None, it is defaulted
                                    to: "output_0" (for multiple outputs, this parameter must be provided).
        :param dynamic_axes:        If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can
                                    specify it by giving a dynamic axis to the input / output layer by its name as
                                    follows: {
                                        "input layer name": {0: "batch_size"},
                                        "output layer name": {0: "batch_size"},
                                    }
                                    If provided, the 'is_batched' flag will be ignored. Defaulted to None.
        :param is_batched:          Whether to include a batch size as the first axis in every input and output layer.
                                    Defaulted to True. Will be ignored if 'dynamic_axes' is provided.
        """
        # Import the framework and handler:
        import torch
        from mlrun.frameworks.pytorch import PyTorchUtils

        # Parse the 'input_signature' parameter:
        if input_signature is not None:
            input_signature = tuple(
                [
                    torch.zeros(
                        size=shape,
                        dtype=PyTorchUtils.convert_value_type_to_torch_dtype(
                            value_type=value_type
                        ),
                    )
                    for (shape, value_type) in input_signature
                ]
            )

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_sample=input_signature,
            optimize=optimize_model,
            input_layers_names=input_layers_names,
            output_layers_names=output_layers_names,
            dynamic_axes=dynamic_axes,
            is_batched=is_batched,
        )


# Map for getting the conversion function according to the provided framework:
_CONVERSION_MAP = {
    "tensorflow.keras": _ToONNXConversions.tf_keras_to_onnx,
    "torch": _ToONNXConversions.pytorch_to_onnx,
}  # type: Dict[str, Callable]


def to_onnx(
    context: mlrun.MLClientCtx,
    model_path: str,
    load_model_kwargs: dict = None,
    onnx_model_name: str = None,
    optimize_model: bool = True,
    framework_kwargs: Dict[str, Any] = None,
):
    """
    Convert the given model to an ONNX model.

    :param context:           The MLRun function execution context
    :param model_path:        The model path store object.
    :param load_model_kwargs: Keyword arguments to pass to the `AutoMLRun.load_model` method.
    :param onnx_model_name:   The name to use to log the converted ONNX model. If not given, the given `model_name` will
                              be used with an additional suffix `_onnx`. Defaulted to None.
    :param optimize_model:    Whether to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                              Defaulted to True.
    :param framework_kwargs:  Additional arguments each framework may require to convert to ONNX. To get the doc string
                              of the desired framework onnx conversion function, pass "help".
    """
    from mlrun.frameworks.auto_mlrun.auto_mlrun import AutoMLRun

    # Get a model handler of the required framework:
    load_model_kwargs = load_model_kwargs or {}
    model_handler = AutoMLRun.load_model(
        model_path=model_path, context=context, **load_model_kwargs
    )

    # Get the model's framework:
    framework = model_handler.FRAMEWORK_NAME

    # Use the conversion map to get the specific framework to onnx conversion:
    if framework not in _CONVERSION_MAP:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"The following framework: '{framework}', has no ONNX conversion."
        )
    conversion_function = _CONVERSION_MAP[framework]

    # Check if needed to print the function's doc string ("help" is passed):
    if framework_kwargs == "help":
        print(conversion_function.__doc__)
        return

    # Set the default empty framework kwargs if needed:
    if framework_kwargs is None:
        framework_kwargs = {}

    # Run the conversion:
    try:
        conversion_function(
            model_handler=model_handler,
            onnx_model_name=onnx_model_name,
            optimize_model=optimize_model,
            **framework_kwargs,
        )
    except TypeError as exception:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"ERROR: A TypeError exception was raised during the conversion:\n{exception}. "
            f"Please read the {framework} framework conversion function doc string by passing 'help' in the "
            f"'framework_kwargs' dictionary parameter."
        )


def optimize(
    context: mlrun.MLClientCtx,
    model_path: str,
    handler_init_kwargs: dict = None,
    optimizations: List[str] = None,
    fixed_point: bool = False,
    optimized_model_name: str = None,
):
    """
    Optimize the given ONNX model.

    :param context:              The MLRun function execution context.
    :param model_path:           Path to the ONNX model object.
    :param handler_init_kwargs:  Keyword arguments to pass to the `ONNXModelHandler` init method preloading.
    :param optimizations:        List of possible optimizations. To see what optimizations are available, pass "help".
                                 If None, all the optimizations will be used. Defaulted to None.
    :param fixed_point:          Optimize the weights using fixed point. Defaulted to False.
    :param optimized_model_name: The name of the optimized model. If None, the original model will be overridden.
                                 Defaulted to None.
    """
    # Import the model handler:
    import onnxoptimizer
    from mlrun.frameworks.onnx import ONNXModelHandler

    # Check if needed to print the available optimizations ("help" is passed):
    if optimizations == "help":
        available_passes = "\n* ".join(onnxoptimizer.get_available_passes())
        print(f"The available optimizations are:\n* {available_passes}")
        return

    # Create the model handler:
    handler_init_kwargs = handler_init_kwargs or {}
    model_handler = ONNXModelHandler(
        model_path=model_path, context=context, **handler_init_kwargs
    )

    # Load the ONNX model:
    model_handler.load()

    # Optimize the model using the given configurations:
    model_handler.optimize(optimizations=optimizations, fixed_point=fixed_point)

    # Rename if needed:
    if optimized_model_name is not None:
        model_handler.set_model_name(model_name=optimized_model_name)

    # Log the optimized model:
    model_handler.log()

+    base_image: mlrun/mlrun
+    with_mlrun: false
+    auto_build: true
+    requirements:
+    - tqdm~=4.67.1
+    - tensorflow~=2.19.0
+    - tf_keras~=2.19.0
+    - torch~=2.8.0
+    - torchvision~=0.23.0
+    - onnx~=1.17.0
+    - onnxruntime~=1.19.2
+    - onnxoptimizer~=0.3.13
+    - onnxmltools~=1.13.0
+    - tf2onnx~=1.16.1
+    - plotly~=5.23
+    origin_filename: ''
+    code_origin: ''
+verbose: false
+
+        
+    
+ + \ No newline at end of file diff --git a/functions/development/onnx_utils/1.4.0/static/item.html b/functions/development/onnx_utils/1.4.0/static/item.html new file mode 100644 index 00000000..b4662c63 --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/static/item.html @@ -0,0 +1,77 @@ + + + + + + + + + + + Source + + + + +
+        
+apiVersion: v1
+categories:
+- utilities
+- deep-learning
+description: ONNX intigration in MLRun, some utils functions for the ONNX framework,
+  optimizing and converting models from different framework to ONNX using MLRun.
+doc: ''
+example: onnx_utils.ipynb
+generationDate: 2022-08-28:17-25
+hidden: false
+icon: ''
+labels:
+  author: Iguazio
+maintainers: []
+marketplaceType: ''
+mlrunVersion: 1.10.0
+name: onnx_utils
+platformVersion: 3.5.0
+spec:
+  extra_spec:
+    allow_empty_resources: true
+    build:
+      auto_build: true
+      with_mlrun: false
+  filename: onnx_utils.py
+  handler: to_onnx
+  image: mlrun/mlrun
+  kind: job
+  requirements:
+  - tqdm~=4.67.1
+  - tensorflow~=2.19.0
+  - tf_keras~=2.19.0
+  - torch~=2.8.0
+  - torchvision~=0.23.0
+  - onnx~=1.17.0
+  - onnxruntime~=1.19.2
+  - onnxoptimizer~=0.3.13
+  - onnxmltools~=1.13.0
+  - tf2onnx~=1.16.1
+  - plotly~=5.23
+url: ''
+version: 1.4.0
+
+        
+    
+ + \ No newline at end of file diff --git a/functions/development/onnx_utils/1.4.0/static/onnx_utils.html b/functions/development/onnx_utils/1.4.0/static/onnx_utils.html new file mode 100644 index 00000000..66688879 --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/static/onnx_utils.html @@ -0,0 +1,449 @@ + + + + + + + +onnx_utils.onnx_utils + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + +
+
+
+
+
+
+ +
+ +
+
+
+ + + + +
+
+
+
+
+
+
+
+
+
+
+
+
+ + +
+
+
+
+
+

+ +
+
+
+
+
+ +
+

Source code for onnx_utils.onnx_utils

+# Copyright 2019 Iguazio
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from typing import Any, Callable, Dict, List, Tuple
+
+import mlrun
+
+
+class _ToONNXConversions:
+    """
+    An ONNX conversion functions library class.
+    """
+
+    @staticmethod
+    def tf_keras_to_onnx(
+        model_handler,
+        onnx_model_name: str = None,
+        optimize_model: bool = True,
+        input_signature: List[Tuple[Tuple[int], str]] = None,
+    ):
+        """
+        Convert a TF.Keras model to an ONNX model and log it back to MLRun as a new model object.
+
+        :param model_handler:   An initialized TFKerasModelHandler with a loaded model to convert to ONNX.
+        :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name`
+                                will be used with an additional suffix `_onnx`. Defaulted to None.
+        :param optimize_model:  Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the model.
+                                Defaulted to True.
+        :param input_signature: A list of the input layers shape and data type properties. Expected to receive a list
+                                where each element is an input layer tuple. An input layer tuple is a tuple of:
+                                [0] = Layer's shape, a tuple of integers.
+                                [1] = Layer's data type, a mlrun.data_types.ValueType string.
+                                If None, the input signature will be tried to be read from the model artifact. Defaulted
+                                to None.
+        """
+        # Import the framework and handler:
+        import tensorflow as tf
+        from mlrun.frameworks.tf_keras import TFKerasUtils
+
+        # Check the given 'input_signature' parameter:
+        if input_signature is None:
+            # Read the inputs from the model:
+            try:
+                model_handler.read_inputs_from_model()
+            except Exception as error:
+                raise mlrun.errors.MLRunRuntimeError(
+                    f"Please provide the 'input_signature' parameter. The function tried reading the input layers "
+                    f"information automatically but failed with the following error: {error}"
+                )
+        else:
+            # Parse the 'input_signature' parameter:
+            input_signature = [
+                tf.TensorSpec(
+                    shape=shape,
+                    dtype=TFKerasUtils.convert_value_type_to_tf_dtype(
+                        value_type=value_type
+                    ),
+                )
+                for (shape, value_type) in input_signature
+            ]
+
+        # Convert to ONNX:
+        model_handler.to_onnx(
+            model_name=onnx_model_name,
+            input_signature=input_signature,
+            optimize=optimize_model,
+        )
+
+    @staticmethod
+    def pytorch_to_onnx(
+        model_handler,
+        onnx_model_name: str = None,
+        optimize_model: bool = True,
+        input_signature: List[Tuple[Tuple[int, ...], str]] = None,
+        input_layers_names: List[str] = None,
+        output_layers_names: List[str] = None,
+        dynamic_axes: Dict[str, Dict[int, str]] = None,
+        is_batched: bool = True,
+    ):
+        """
+        Convert a PyTorch model to an ONNX model and log it back to MLRun as a new model object.
+
+        :param model_handler:       An initialized PyTorchModelHandler with a loaded model to convert to ONNX.
+        :param onnx_model_name:     The name to use to log the converted ONNX model. If not given, the given
+                                    `model_name` will be used with an additional suffix `_onnx`. Defaulted to None.
+        :param optimize_model:      Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the
+                                    model. Defaulted to True.
+        :param input_signature:     A list of the input layers shape and data type properties. Expected to receive a
+                                    list where each element is an input layer tuple. An input layer tuple is a tuple of:
+                                    [0] = Layer's shape, a tuple of integers.
+                                    [1] = Layer's data type, a mlrun.data_types.ValueType string.
+                                    If None, the input signature will be tried to be read from the model artifact.
+                                    Defaulted to None.
+        :param input_layers_names:  List of names to assign to the input nodes of the graph in order. All of the other
+                                    parameters (inner layers) can be set as well by passing additional names in the
+                                    list. The order is by the order of the parameters in the model. If None, the inputs
+                                    will be read from the handler's inputs. If its also None, it is defaulted to:
+                                    "input_0", "input_1", ...
+        :param output_layers_names: List of names to assign to the output nodes of the graph in order. If None, the
+                                    outputs will be read from the handler's outputs. If its also None, it is defaulted
+                                    to: "output_0" (for multiple outputs, this parameter must be provided).
+        :param dynamic_axes:        If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can
+                                    specify it by giving a dynamic axis to the input / output layer by its name as
+                                    follows: {
+                                        "input layer name": {0: "batch_size"},
+                                        "output layer name": {0: "batch_size"},
+                                    }
+                                    If provided, the 'is_batched' flag will be ignored. Defaulted to None.
+        :param is_batched:          Whether to include a batch size as the first axis in every input and output layer.
+                                    Defaulted to True. Will be ignored if 'dynamic_axes' is provided.
+        """
+        # Import the framework and handler:
+        import torch
+        from mlrun.frameworks.pytorch import PyTorchUtils
+
+        # Parse the 'input_signature' parameter:
+        if input_signature is not None:
+            input_signature = tuple(
+                [
+                    torch.zeros(
+                        size=shape,
+                        dtype=PyTorchUtils.convert_value_type_to_torch_dtype(
+                            value_type=value_type
+                        ),
+                    )
+                    for (shape, value_type) in input_signature
+                ]
+            )
+
+        # Convert to ONNX:
+        model_handler.to_onnx(
+            model_name=onnx_model_name,
+            input_sample=input_signature,
+            optimize=optimize_model,
+            input_layers_names=input_layers_names,
+            output_layers_names=output_layers_names,
+            dynamic_axes=dynamic_axes,
+            is_batched=is_batched,
+        )
+
+
+# Map for getting the conversion function according to the provided framework:
+_CONVERSION_MAP = {
+    "tensorflow.keras": _ToONNXConversions.tf_keras_to_onnx,
+    "torch": _ToONNXConversions.pytorch_to_onnx,
+}  # type: Dict[str, Callable]
+
+
+
+[docs] +def to_onnx( + context: mlrun.MLClientCtx, + model_path: str, + load_model_kwargs: dict = None, + onnx_model_name: str = None, + optimize_model: bool = True, + framework_kwargs: Dict[str, Any] = None, +): + """ + Convert the given model to an ONNX model. + + :param context: The MLRun function execution context + :param model_path: The model path store object. + :param load_model_kwargs: Keyword arguments to pass to the `AutoMLRun.load_model` method. + :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name` will + be used with an additional suffix `_onnx`. Defaulted to None. + :param optimize_model: Whether to optimize the ONNX model using 'onnxoptimizer' before saving the model. + Defaulted to True. + :param framework_kwargs: Additional arguments each framework may require to convert to ONNX. To get the doc string + of the desired framework onnx conversion function, pass "help". + """ + from mlrun.frameworks.auto_mlrun.auto_mlrun import AutoMLRun + + # Get a model handler of the required framework: + load_model_kwargs = load_model_kwargs or {} + model_handler = AutoMLRun.load_model( + model_path=model_path, context=context, **load_model_kwargs + ) + + # Get the model's framework: + framework = model_handler.FRAMEWORK_NAME + + # Use the conversion map to get the specific framework to onnx conversion: + if framework not in _CONVERSION_MAP: + raise mlrun.errors.MLRunInvalidArgumentError( + f"The following framework: '{framework}', has no ONNX conversion." + ) + conversion_function = _CONVERSION_MAP[framework] + + # Check if needed to print the function's doc string ("help" is passed): + if framework_kwargs == "help": + print(conversion_function.__doc__) + return + + # Set the default empty framework kwargs if needed: + if framework_kwargs is None: + framework_kwargs = {} + + # Run the conversion: + try: + conversion_function( + model_handler=model_handler, + onnx_model_name=onnx_model_name, + optimize_model=optimize_model, + **framework_kwargs, + ) + except TypeError as exception: + raise mlrun.errors.MLRunInvalidArgumentError( + f"ERROR: A TypeError exception was raised during the conversion:\n{exception}. " + f"Please read the {framework} framework conversion function doc string by passing 'help' in the " + f"'framework_kwargs' dictionary parameter." + )
+ + + +
+[docs] +def optimize( + context: mlrun.MLClientCtx, + model_path: str, + handler_init_kwargs: dict = None, + optimizations: List[str] = None, + fixed_point: bool = False, + optimized_model_name: str = None, +): + """ + Optimize the given ONNX model. + + :param context: The MLRun function execution context. + :param model_path: Path to the ONNX model object. + :param handler_init_kwargs: Keyword arguments to pass to the `ONNXModelHandler` init method preloading. + :param optimizations: List of possible optimizations. To see what optimizations are available, pass "help". + If None, all the optimizations will be used. Defaulted to None. + :param fixed_point: Optimize the weights using fixed point. Defaulted to False. + :param optimized_model_name: The name of the optimized model. If None, the original model will be overridden. + Defaulted to None. + """ + # Import the model handler: + import onnxoptimizer + from mlrun.frameworks.onnx import ONNXModelHandler + + # Check if needed to print the available optimizations ("help" is passed): + if optimizations == "help": + available_passes = "\n* ".join(onnxoptimizer.get_available_passes()) + print(f"The available optimizations are:\n* {available_passes}") + return + + # Create the model handler: + handler_init_kwargs = handler_init_kwargs or {} + model_handler = ONNXModelHandler( + model_path=model_path, context=context, **handler_init_kwargs + ) + + # Load the ONNX model: + model_handler.load() + + # Optimize the model using the given configurations: + model_handler.optimize(optimizations=optimizations, fixed_point=fixed_point) + + # Rename if needed: + if optimized_model_name is not None: + model_handler.set_model_name(model_name=optimized_model_name) + + # Log the optimized model: + model_handler.log()
+ +
+
+
+
+
+
+
+
+
+ +
+
+
+
+ + + +
+
+ + \ No newline at end of file diff --git a/functions/development/onnx_utils/1.4.0/static/source.html b/functions/development/onnx_utils/1.4.0/static/source.html new file mode 100644 index 00000000..77715e97 --- /dev/null +++ b/functions/development/onnx_utils/1.4.0/static/source.html @@ -0,0 +1,306 @@ + + + + + + + + + + + Source + + + + +
+        
+# Copyright 2019 Iguazio
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from typing import Any, Callable, Dict, List, Tuple
+
+import mlrun
+
+
+class _ToONNXConversions:
+    """
+    An ONNX conversion functions library class.
+    """
+
+    @staticmethod
+    def tf_keras_to_onnx(
+        model_handler,
+        onnx_model_name: str = None,
+        optimize_model: bool = True,
+        input_signature: List[Tuple[Tuple[int], str]] = None,
+    ):
+        """
+        Convert a TF.Keras model to an ONNX model and log it back to MLRun as a new model object.
+
+        :param model_handler:   An initialized TFKerasModelHandler with a loaded model to convert to ONNX.
+        :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name`
+                                will be used with an additional suffix `_onnx`. Defaulted to None.
+        :param optimize_model:  Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the model.
+                                Defaulted to True.
+        :param input_signature: A list of the input layers shape and data type properties. Expected to receive a list
+                                where each element is an input layer tuple. An input layer tuple is a tuple of:
+                                [0] = Layer's shape, a tuple of integers.
+                                [1] = Layer's data type, a mlrun.data_types.ValueType string.
+                                If None, the input signature will be tried to be read from the model artifact. Defaulted
+                                to None.
+        """
+        # Import the framework and handler:
+        import tensorflow as tf
+        from mlrun.frameworks.tf_keras import TFKerasUtils
+
+        # Check the given 'input_signature' parameter:
+        if input_signature is None:
+            # Read the inputs from the model:
+            try:
+                model_handler.read_inputs_from_model()
+            except Exception as error:
+                raise mlrun.errors.MLRunRuntimeError(
+                    f"Please provide the 'input_signature' parameter. The function tried reading the input layers "
+                    f"information automatically but failed with the following error: {error}"
+                )
+        else:
+            # Parse the 'input_signature' parameter:
+            input_signature = [
+                tf.TensorSpec(
+                    shape=shape,
+                    dtype=TFKerasUtils.convert_value_type_to_tf_dtype(
+                        value_type=value_type
+                    ),
+                )
+                for (shape, value_type) in input_signature
+            ]
+
+        # Convert to ONNX:
+        model_handler.to_onnx(
+            model_name=onnx_model_name,
+            input_signature=input_signature,
+            optimize=optimize_model,
+        )
+
+    @staticmethod
+    def pytorch_to_onnx(
+        model_handler,
+        onnx_model_name: str = None,
+        optimize_model: bool = True,
+        input_signature: List[Tuple[Tuple[int, ...], str]] = None,
+        input_layers_names: List[str] = None,
+        output_layers_names: List[str] = None,
+        dynamic_axes: Dict[str, Dict[int, str]] = None,
+        is_batched: bool = True,
+    ):
+        """
+        Convert a PyTorch model to an ONNX model and log it back to MLRun as a new model object.
+
+        :param model_handler:       An initialized PyTorchModelHandler with a loaded model to convert to ONNX.
+        :param onnx_model_name:     The name to use to log the converted ONNX model. If not given, the given
+                                    `model_name` will be used with an additional suffix `_onnx`. Defaulted to None.
+        :param optimize_model:      Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the
+                                    model. Defaulted to True.
+        :param input_signature:     A list of the input layers shape and data type properties. Expected to receive a
+                                    list where each element is an input layer tuple. An input layer tuple is a tuple of:
+                                    [0] = Layer's shape, a tuple of integers.
+                                    [1] = Layer's data type, a mlrun.data_types.ValueType string.
+                                    If None, the input signature will be tried to be read from the model artifact.
+                                    Defaulted to None.
+        :param input_layers_names:  List of names to assign to the input nodes of the graph in order. All of the other
+                                    parameters (inner layers) can be set as well by passing additional names in the
+                                    list. The order is by the order of the parameters in the model. If None, the inputs
+                                    will be read from the handler's inputs. If its also None, it is defaulted to:
+                                    "input_0", "input_1", ...
+        :param output_layers_names: List of names to assign to the output nodes of the graph in order. If None, the
+                                    outputs will be read from the handler's outputs. If its also None, it is defaulted
+                                    to: "output_0" (for multiple outputs, this parameter must be provided).
+        :param dynamic_axes:        If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can
+                                    specify it by giving a dynamic axis to the input / output layer by its name as
+                                    follows: {
+                                        "input layer name": {0: "batch_size"},
+                                        "output layer name": {0: "batch_size"},
+                                    }
+                                    If provided, the 'is_batched' flag will be ignored. Defaulted to None.
+        :param is_batched:          Whether to include a batch size as the first axis in every input and output layer.
+                                    Defaulted to True. Will be ignored if 'dynamic_axes' is provided.
+        """
+        # Import the framework and handler:
+        import torch
+        from mlrun.frameworks.pytorch import PyTorchUtils
+
+        # Parse the 'input_signature' parameter:
+        if input_signature is not None:
+            input_signature = tuple(
+                [
+                    torch.zeros(
+                        size=shape,
+                        dtype=PyTorchUtils.convert_value_type_to_torch_dtype(
+                            value_type=value_type
+                        ),
+                    )
+                    for (shape, value_type) in input_signature
+                ]
+            )
+
+        # Convert to ONNX:
+        model_handler.to_onnx(
+            model_name=onnx_model_name,
+            input_sample=input_signature,
+            optimize=optimize_model,
+            input_layers_names=input_layers_names,
+            output_layers_names=output_layers_names,
+            dynamic_axes=dynamic_axes,
+            is_batched=is_batched,
+        )
+
+
+# Map for getting the conversion function according to the provided framework:
+_CONVERSION_MAP = {
+    "tensorflow.keras": _ToONNXConversions.tf_keras_to_onnx,
+    "torch": _ToONNXConversions.pytorch_to_onnx,
+}  # type: Dict[str, Callable]
+
+
+def to_onnx(
+    context: mlrun.MLClientCtx,
+    model_path: str,
+    load_model_kwargs: dict = None,
+    onnx_model_name: str = None,
+    optimize_model: bool = True,
+    framework_kwargs: Dict[str, Any] = None,
+):
+    """
+    Convert the given model to an ONNX model.
+
+    :param context:           The MLRun function execution context
+    :param model_path:        The model path store object.
+    :param load_model_kwargs: Keyword arguments to pass to the `AutoMLRun.load_model` method.
+    :param onnx_model_name:   The name to use to log the converted ONNX model. If not given, the given `model_name` will
+                              be used with an additional suffix `_onnx`. Defaulted to None.
+    :param optimize_model:    Whether to optimize the ONNX model using 'onnxoptimizer' before saving the model.
+                              Defaulted to True.
+    :param framework_kwargs:  Additional arguments each framework may require to convert to ONNX. To get the doc string
+                              of the desired framework onnx conversion function, pass "help".
+    """
+    from mlrun.frameworks.auto_mlrun.auto_mlrun import AutoMLRun
+
+    # Get a model handler of the required framework:
+    load_model_kwargs = load_model_kwargs or {}
+    model_handler = AutoMLRun.load_model(
+        model_path=model_path, context=context, **load_model_kwargs
+    )
+
+    # Get the model's framework:
+    framework = model_handler.FRAMEWORK_NAME
+
+    # Use the conversion map to get the specific framework to onnx conversion:
+    if framework not in _CONVERSION_MAP:
+        raise mlrun.errors.MLRunInvalidArgumentError(
+            f"The following framework: '{framework}', has no ONNX conversion."
+        )
+    conversion_function = _CONVERSION_MAP[framework]
+
+    # Check if needed to print the function's doc string ("help" is passed):
+    if framework_kwargs == "help":
+        print(conversion_function.__doc__)
+        return
+
+    # Set the default empty framework kwargs if needed:
+    if framework_kwargs is None:
+        framework_kwargs = {}
+
+    # Run the conversion:
+    try:
+        conversion_function(
+            model_handler=model_handler,
+            onnx_model_name=onnx_model_name,
+            optimize_model=optimize_model,
+            **framework_kwargs,
+        )
+    except TypeError as exception:
+        raise mlrun.errors.MLRunInvalidArgumentError(
+            f"ERROR: A TypeError exception was raised during the conversion:\n{exception}. "
+            f"Please read the {framework} framework conversion function doc string by passing 'help' in the "
+            f"'framework_kwargs' dictionary parameter."
+        )
+
+
+def optimize(
+    context: mlrun.MLClientCtx,
+    model_path: str,
+    handler_init_kwargs: dict = None,
+    optimizations: List[str] = None,
+    fixed_point: bool = False,
+    optimized_model_name: str = None,
+):
+    """
+    Optimize the given ONNX model.
+
+    :param context:              The MLRun function execution context.
+    :param model_path:           Path to the ONNX model object.
+    :param handler_init_kwargs:  Keyword arguments to pass to the `ONNXModelHandler` init method preloading.
+    :param optimizations:        List of possible optimizations. To see what optimizations are available, pass "help".
+                                 If None, all the optimizations will be used. Defaulted to None.
+    :param fixed_point:          Optimize the weights using fixed point. Defaulted to False.
+    :param optimized_model_name: The name of the optimized model. If None, the original model will be overridden.
+                                 Defaulted to None.
+    """
+    # Import the model handler:
+    import onnxoptimizer
+    from mlrun.frameworks.onnx import ONNXModelHandler
+
+    # Check if needed to print the available optimizations ("help" is passed):
+    if optimizations == "help":
+        available_passes = "\n* ".join(onnxoptimizer.get_available_passes())
+        print(f"The available optimizations are:\n* {available_passes}")
+        return
+
+    # Create the model handler:
+    handler_init_kwargs = handler_init_kwargs or {}
+    model_handler = ONNXModelHandler(
+        model_path=model_path, context=context, **handler_init_kwargs
+    )
+
+    # Load the ONNX model:
+    model_handler.load()
+
+    # Optimize the model using the given configurations:
+    model_handler.optimize(optimizations=optimizations, fixed_point=fixed_point)
+
+    # Rename if needed:
+    if optimized_model_name is not None:
+        model_handler.set_model_name(model_name=optimized_model_name)
+
+    # Log the optimized model:
+    model_handler.log()
+
+        
+    
+ + \ No newline at end of file diff --git a/functions/development/onnx_utils/latest/src/function.yaml b/functions/development/onnx_utils/latest/src/function.yaml index 05a0f0bc..091002cd 100644 --- a/functions/development/onnx_utils/latest/src/function.yaml +++ b/functions/development/onnx_utils/latest/src/function.yaml @@ -1,39 +1,13 @@ -kind: job metadata: + name: onnx-utils + tag: '' categories: - utilities - deep-learning - name: onnx-utils - tag: '' -verbose: false +kind: job spec: - build: - code_origin: '' - base_image: mlrun/mlrun - origin_filename: '' - functionSourceCode: # Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Dict, List, Tuple

import mlrun


class _ToONNXConversions:
    """
    An ONNX conversion functions library class.
    """

    @staticmethod
    def tf_keras_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int], str]] = None,
    ):
        """
        Convert a TF.Keras model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:   An initialized TFKerasModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name`
                                will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:  Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                                Defaulted to True.
        :param input_signature: A list of the input layers shape and data type properties. Expected to receive a list
                                where each element is an input layer tuple. An input layer tuple is a tuple of:
                                [0] = Layer's shape, a tuple of integers.
                                [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                If None, the input signature will be tried to be read from the model artifact. Defaulted
                                to None.
        """
        # Import the framework and handler:
        import tensorflow as tf
        from mlrun.frameworks.tf_keras import TFKerasUtils

        # Check the given 'input_signature' parameter:
        if input_signature is None:
            # Read the inputs from the model:
            try:
                model_handler.read_inputs_from_model()
            except Exception as error:
                raise mlrun.errors.MLRunRuntimeError(
                    f"Please provide the 'input_signature' parameter. The function tried reading the input layers "
                    f"information automatically but failed with the following error: {error}"
                )
        else:
            # Parse the 'input_signature' parameter:
            input_signature = [
                tf.TensorSpec(
                    shape=shape,
                    dtype=TFKerasUtils.convert_value_type_to_tf_dtype(
                        value_type=value_type
                    ),
                )
                for (shape, value_type) in input_signature
            ]

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_signature=input_signature,
            optimize=optimize_model,
        )

    @staticmethod
    def pytorch_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int, ...], str]] = None,
        input_layers_names: List[str] = None,
        output_layers_names: List[str] = None,
        dynamic_axes: Dict[str, Dict[int, str]] = None,
        is_batched: bool = True,
    ):
        """
        Convert a PyTorch model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:       An initialized PyTorchModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name:     The name to use to log the converted ONNX model. If not given, the given
                                    `model_name` will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:      Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the
                                    model. Defaulted to True.
        :param input_signature:     A list of the input layers shape and data type properties. Expected to receive a
                                    list where each element is an input layer tuple. An input layer tuple is a tuple of:
                                    [0] = Layer's shape, a tuple of integers.
                                    [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                    If None, the input signature will be tried to be read from the model artifact.
                                    Defaulted to None.
        :param input_layers_names:  List of names to assign to the input nodes of the graph in order. All of the other
                                    parameters (inner layers) can be set as well by passing additional names in the
                                    list. The order is by the order of the parameters in the model. If None, the inputs
                                    will be read from the handler's inputs. If its also None, it is defaulted to:
                                    "input_0", "input_1", ...
        :param output_layers_names: List of names to assign to the output nodes of the graph in order. If None, the
                                    outputs will be read from the handler's outputs. If its also None, it is defaulted
                                    to: "output_0" (for multiple outputs, this parameter must be provided).
        :param dynamic_axes:        If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can
                                    specify it by giving a dynamic axis to the input / output layer by its name as
                                    follows: {
                                        "input layer name": {0: "batch_size"},
                                        "output layer name": {0: "batch_size"},
                                    }
                                    If provided, the 'is_batched' flag will be ignored. Defaulted to None.
        :param is_batched:          Whether to include a batch size as the first axis in every input and output layer.
                                    Defaulted to True. Will be ignored if 'dynamic_axes' is provided.
        """
        # Import the framework and handler:
        import torch
        from mlrun.frameworks.pytorch import PyTorchUtils

        # Parse the 'input_signature' parameter:
        if input_signature is not None:
            input_signature = tuple(
                [
                    torch.zeros(
                        size=shape,
                        dtype=PyTorchUtils.convert_value_type_to_torch_dtype(
                            value_type=value_type
                        ),
                    )
                    for (shape, value_type) in input_signature
                ]
            )

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_sample=input_signature,
            optimize=optimize_model,
            input_layers_names=input_layers_names,
            output_layers_names=output_layers_names,
            dynamic_axes=dynamic_axes,
            is_batched=is_batched,
        )


# Map for getting the conversion function according to the provided framework:
_CONVERSION_MAP = {
    "tensorflow.keras": _ToONNXConversions.tf_keras_to_onnx,
    "torch": _ToONNXConversions.pytorch_to_onnx,
}  # type: Dict[str, Callable]


def to_onnx(
    context: mlrun.MLClientCtx,
    model_path: str,
    load_model_kwargs: dict = None,
    onnx_model_name: str = None,
    optimize_model: bool = True,
    framework_kwargs: Dict[str, Any] = None,
):
    """
    Convert the given model to an ONNX model.

    :param context:           The MLRun function execution context
    :param model_path:        The model path store object.
    :param load_model_kwargs: Keyword arguments to pass to the `AutoMLRun.load_model` method.
    :param onnx_model_name:   The name to use to log the converted ONNX model. If not given, the given `model_name` will
                              be used with an additional suffix `_onnx`. Defaulted to None.
    :param optimize_model:    Whether to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                              Defaulted to True.
    :param framework_kwargs:  Additional arguments each framework may require to convert to ONNX. To get the doc string
                              of the desired framework onnx conversion function, pass "help".
    """
    from mlrun.frameworks.auto_mlrun.auto_mlrun import AutoMLRun

    # Get a model handler of the required framework:
    load_model_kwargs = load_model_kwargs or {}
    model_handler = AutoMLRun.load_model(
        model_path=model_path, context=context, **load_model_kwargs
    )

    # Get the model's framework:
    framework = model_handler.FRAMEWORK_NAME

    # Use the conversion map to get the specific framework to onnx conversion:
    if framework not in _CONVERSION_MAP:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"The following framework: '{framework}', has no ONNX conversion."
        )
    conversion_function = _CONVERSION_MAP[framework]

    # Check if needed to print the function's doc string ("help" is passed):
    if framework_kwargs == "help":
        print(conversion_function.__doc__)
        return

    # Set the default empty framework kwargs if needed:
    if framework_kwargs is None:
        framework_kwargs = {}

    # Run the conversion:
    try:
        conversion_function(
            model_handler=model_handler,
            onnx_model_name=onnx_model_name,
            optimize_model=optimize_model,
            **framework_kwargs,
        )
    except TypeError as exception:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"ERROR: A TypeError exception was raised during the conversion:\n{exception}. "
            f"Please read the {framework} framework conversion function doc string by passing 'help' in the "
            f"'framework_kwargs' dictionary parameter."
        )


def optimize(
    context: mlrun.MLClientCtx,
    model_path: str,
    handler_init_kwargs: dict = None,
    optimizations: List[str] = None,
    fixed_point: bool = False,
    optimized_model_name: str = None,
):
    """
    Optimize the given ONNX model.

    :param context:              The MLRun function execution context.
    :param model_path:           Path to the ONNX model object.
    :param handler_init_kwargs:  Keyword arguments to pass to the `ONNXModelHandler` init method preloading.
    :param optimizations:        List of possible optimizations. To see what optimizations are available, pass "help".
                                 If None, all the optimizations will be used. Defaulted to None.
    :param fixed_point:          Optimize the weights using fixed point. Defaulted to False.
    :param optimized_model_name: The name of the optimized model. If None, the original model will be overridden.
                                 Defaulted to None.
    """
    # Import the model handler:
    import onnxoptimizer
    from mlrun.frameworks.onnx import ONNXModelHandler

    # Check if needed to print the available optimizations ("help" is passed):
    if optimizations == "help":
        available_passes = "\n* ".join(onnxoptimizer.get_available_passes())
        print(f"The available optimizations are:\n* {available_passes}")
        return

    # Create the model handler:
    handler_init_kwargs = handler_init_kwargs or {}
    model_handler = ONNXModelHandler(
        model_path=model_path, context=context, **handler_init_kwargs
    )

    # Load the ONNX model:
    model_handler.load()

    # Optimize the model using the given configurations:
    model_handler.optimize(optimizations=optimizations, fixed_point=fixed_point)

    # Rename if needed:
    if optimized_model_name is not None:
        model_handler.set_model_name(model_name=optimized_model_name)

    # Log the optimized model:
    model_handler.log()
 - requirements: - - tqdm~=4.67.1 - - tensorflow~=2.19.0 - - tf_keras~=2.19.0 - - torch~=2.6.0 - - torchvision~=0.21.0 - - onnx~=1.17.0 - - onnxruntime~=1.19.2 - - onnxoptimizer~=0.3.13 - - onnxmltools~=1.13.0 - - tf2onnx~=1.16.1 - - plotly~=5.23 - with_mlrun: false - auto_build: true - disable_auto_mount: false - description: ONNX intigration in MLRun, some utils functions for the ONNX framework, - optimizing and converting models from different framework to ONNX using MLRun. - image: '' entry_points: tf_keras_to_onnx: - doc: Convert a TF.Keras model to an ONNX model and log it back to MLRun as a - new model object. name: tf_keras_to_onnx parameters: - name: model_handler @@ -58,12 +32,12 @@ spec: data type, a mlrun.data_types.ValueType string. If None, the input signature will be tried to be read from the model artifact. Defaulted to None.' default: null + doc: Convert a TF.Keras model to an ONNX model and log it back to MLRun as a + new model object. + lineno: 26 has_varargs: false has_kwargs: false - lineno: 26 pytorch_to_onnx: - doc: Convert a PyTorch model to an ONNX model and log it back to MLRun as a - new model object. name: pytorch_to_onnx parameters: - name: model_handler @@ -116,11 +90,12 @@ spec: doc: Whether to include a batch size as the first axis in every input and output layer. Defaulted to True. Will be ignored if 'dynamic_axes' is provided. default: true + doc: Convert a PyTorch model to an ONNX model and log it back to MLRun as a + new model object. + lineno: 81 has_varargs: false has_kwargs: false - lineno: 81 to_onnx: - doc: Convert the given model to an ONNX model. name: to_onnx parameters: - name: context @@ -150,11 +125,11 @@ spec: get the doc string of the desired framework onnx conversion function, pass "help". default: null + doc: Convert the given model to an ONNX model. + lineno: 160 has_varargs: false has_kwargs: false - lineno: 160 optimize: - doc: Optimize the given ONNX model. name: optimize parameters: - name: context @@ -181,9 +156,34 @@ spec: doc: The name of the optimized model. If None, the original model will be overridden. Defaulted to None. default: null + doc: Optimize the given ONNX model. + lineno: 224 has_varargs: false has_kwargs: false - lineno: 224 + image: '' default_handler: to_onnx allow_empty_resources: true command: '' + disable_auto_mount: false + description: ONNX intigration in MLRun, some utils functions for the ONNX framework, + optimizing and converting models from different framework to ONNX using MLRun. + build: + functionSourceCode: # Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Dict, List, Tuple

import mlrun


class _ToONNXConversions:
    """
    An ONNX conversion functions library class.
    """

    @staticmethod
    def tf_keras_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int], str]] = None,
    ):
        """
        Convert a TF.Keras model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:   An initialized TFKerasModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name`
                                will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:  Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                                Defaulted to True.
        :param input_signature: A list of the input layers shape and data type properties. Expected to receive a list
                                where each element is an input layer tuple. An input layer tuple is a tuple of:
                                [0] = Layer's shape, a tuple of integers.
                                [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                If None, the input signature will be tried to be read from the model artifact. Defaulted
                                to None.
        """
        # Import the framework and handler:
        import tensorflow as tf
        from mlrun.frameworks.tf_keras import TFKerasUtils

        # Check the given 'input_signature' parameter:
        if input_signature is None:
            # Read the inputs from the model:
            try:
                model_handler.read_inputs_from_model()
            except Exception as error:
                raise mlrun.errors.MLRunRuntimeError(
                    f"Please provide the 'input_signature' parameter. The function tried reading the input layers "
                    f"information automatically but failed with the following error: {error}"
                )
        else:
            # Parse the 'input_signature' parameter:
            input_signature = [
                tf.TensorSpec(
                    shape=shape,
                    dtype=TFKerasUtils.convert_value_type_to_tf_dtype(
                        value_type=value_type
                    ),
                )
                for (shape, value_type) in input_signature
            ]

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_signature=input_signature,
            optimize=optimize_model,
        )

    @staticmethod
    def pytorch_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int, ...], str]] = None,
        input_layers_names: List[str] = None,
        output_layers_names: List[str] = None,
        dynamic_axes: Dict[str, Dict[int, str]] = None,
        is_batched: bool = True,
    ):
        """
        Convert a PyTorch model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:       An initialized PyTorchModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name:     The name to use to log the converted ONNX model. If not given, the given
                                    `model_name` will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:      Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the
                                    model. Defaulted to True.
        :param input_signature:     A list of the input layers shape and data type properties. Expected to receive a
                                    list where each element is an input layer tuple. An input layer tuple is a tuple of:
                                    [0] = Layer's shape, a tuple of integers.
                                    [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                    If None, the input signature will be tried to be read from the model artifact.
                                    Defaulted to None.
        :param input_layers_names:  List of names to assign to the input nodes of the graph in order. All of the other
                                    parameters (inner layers) can be set as well by passing additional names in the
                                    list. The order is by the order of the parameters in the model. If None, the inputs
                                    will be read from the handler's inputs. If its also None, it is defaulted to:
                                    "input_0", "input_1", ...
        :param output_layers_names: List of names to assign to the output nodes of the graph in order. If None, the
                                    outputs will be read from the handler's outputs. If its also None, it is defaulted
                                    to: "output_0" (for multiple outputs, this parameter must be provided).
        :param dynamic_axes:        If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can
                                    specify it by giving a dynamic axis to the input / output layer by its name as
                                    follows: {
                                        "input layer name": {0: "batch_size"},
                                        "output layer name": {0: "batch_size"},
                                    }
                                    If provided, the 'is_batched' flag will be ignored. Defaulted to None.
        :param is_batched:          Whether to include a batch size as the first axis in every input and output layer.
                                    Defaulted to True. Will be ignored if 'dynamic_axes' is provided.
        """
        # Import the framework and handler:
        import torch
        from mlrun.frameworks.pytorch import PyTorchUtils

        # Parse the 'input_signature' parameter:
        if input_signature is not None:
            input_signature = tuple(
                [
                    torch.zeros(
                        size=shape,
                        dtype=PyTorchUtils.convert_value_type_to_torch_dtype(
                            value_type=value_type
                        ),
                    )
                    for (shape, value_type) in input_signature
                ]
            )

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_sample=input_signature,
            optimize=optimize_model,
            input_layers_names=input_layers_names,
            output_layers_names=output_layers_names,
            dynamic_axes=dynamic_axes,
            is_batched=is_batched,
        )


# Map for getting the conversion function according to the provided framework:
_CONVERSION_MAP = {
    "tensorflow.keras": _ToONNXConversions.tf_keras_to_onnx,
    "torch": _ToONNXConversions.pytorch_to_onnx,
}  # type: Dict[str, Callable]


def to_onnx(
    context: mlrun.MLClientCtx,
    model_path: str,
    load_model_kwargs: dict = None,
    onnx_model_name: str = None,
    optimize_model: bool = True,
    framework_kwargs: Dict[str, Any] = None,
):
    """
    Convert the given model to an ONNX model.

    :param context:           The MLRun function execution context
    :param model_path:        The model path store object.
    :param load_model_kwargs: Keyword arguments to pass to the `AutoMLRun.load_model` method.
    :param onnx_model_name:   The name to use to log the converted ONNX model. If not given, the given `model_name` will
                              be used with an additional suffix `_onnx`. Defaulted to None.
    :param optimize_model:    Whether to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                              Defaulted to True.
    :param framework_kwargs:  Additional arguments each framework may require to convert to ONNX. To get the doc string
                              of the desired framework onnx conversion function, pass "help".
    """
    from mlrun.frameworks.auto_mlrun.auto_mlrun import AutoMLRun

    # Get a model handler of the required framework:
    load_model_kwargs = load_model_kwargs or {}
    model_handler = AutoMLRun.load_model(
        model_path=model_path, context=context, **load_model_kwargs
    )

    # Get the model's framework:
    framework = model_handler.FRAMEWORK_NAME

    # Use the conversion map to get the specific framework to onnx conversion:
    if framework not in _CONVERSION_MAP:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"The following framework: '{framework}', has no ONNX conversion."
        )
    conversion_function = _CONVERSION_MAP[framework]

    # Check if needed to print the function's doc string ("help" is passed):
    if framework_kwargs == "help":
        print(conversion_function.__doc__)
        return

    # Set the default empty framework kwargs if needed:
    if framework_kwargs is None:
        framework_kwargs = {}

    # Run the conversion:
    try:
        conversion_function(
            model_handler=model_handler,
            onnx_model_name=onnx_model_name,
            optimize_model=optimize_model,
            **framework_kwargs,
        )
    except TypeError as exception:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"ERROR: A TypeError exception was raised during the conversion:\n{exception}. "
            f"Please read the {framework} framework conversion function doc string by passing 'help' in the "
            f"'framework_kwargs' dictionary parameter."
        )


def optimize(
    context: mlrun.MLClientCtx,
    model_path: str,
    handler_init_kwargs: dict = None,
    optimizations: List[str] = None,
    fixed_point: bool = False,
    optimized_model_name: str = None,
):
    """
    Optimize the given ONNX model.

    :param context:              The MLRun function execution context.
    :param model_path:           Path to the ONNX model object.
    :param handler_init_kwargs:  Keyword arguments to pass to the `ONNXModelHandler` init method preloading.
    :param optimizations:        List of possible optimizations. To see what optimizations are available, pass "help".
                                 If None, all the optimizations will be used. Defaulted to None.
    :param fixed_point:          Optimize the weights using fixed point. Defaulted to False.
    :param optimized_model_name: The name of the optimized model. If None, the original model will be overridden.
                                 Defaulted to None.
    """
    # Import the model handler:
    import onnxoptimizer
    from mlrun.frameworks.onnx import ONNXModelHandler

    # Check if needed to print the available optimizations ("help" is passed):
    if optimizations == "help":
        available_passes = "\n* ".join(onnxoptimizer.get_available_passes())
        print(f"The available optimizations are:\n* {available_passes}")
        return

    # Create the model handler:
    handler_init_kwargs = handler_init_kwargs or {}
    model_handler = ONNXModelHandler(
        model_path=model_path, context=context, **handler_init_kwargs
    )

    # Load the ONNX model:
    model_handler.load()

    # Optimize the model using the given configurations:
    model_handler.optimize(optimizations=optimizations, fixed_point=fixed_point)

    # Rename if needed:
    if optimized_model_name is not None:
        model_handler.set_model_name(model_name=optimized_model_name)

    # Log the optimized model:
    model_handler.log()
 + base_image: mlrun/mlrun + with_mlrun: false + auto_build: true + requirements: + - tqdm~=4.67.1 + - tensorflow~=2.19.0 + - tf_keras~=2.19.0 + - torch~=2.8.0 + - torchvision~=0.23.0 + - onnx~=1.17.0 + - onnxruntime~=1.19.2 + - onnxoptimizer~=0.3.13 + - onnxmltools~=1.13.0 + - tf2onnx~=1.16.1 + - plotly~=5.23 + origin_filename: '' + code_origin: '' +verbose: false diff --git a/functions/development/onnx_utils/latest/src/item.yaml b/functions/development/onnx_utils/latest/src/item.yaml index 803bd259..5f129389 100644 --- a/functions/development/onnx_utils/latest/src/item.yaml +++ b/functions/development/onnx_utils/latest/src/item.yaml @@ -13,7 +13,7 @@ labels: author: Iguazio maintainers: [] marketplaceType: '' -mlrunVersion: 1.7.2 +mlrunVersion: 1.10.0 name: onnx_utils platformVersion: 3.5.0 spec: @@ -30,8 +30,8 @@ spec: - tqdm~=4.67.1 - tensorflow~=2.19.0 - tf_keras~=2.19.0 - - torch~=2.6.0 - - torchvision~=0.21.0 + - torch~=2.8.0 + - torchvision~=0.23.0 - onnx~=1.17.0 - onnxruntime~=1.19.2 - onnxoptimizer~=0.3.13 @@ -39,4 +39,4 @@ spec: - tf2onnx~=1.16.1 - plotly~=5.23 url: '' -version: 1.3.0 +version: 1.4.0 diff --git a/functions/development/onnx_utils/latest/src/onnx_utils.ipynb b/functions/development/onnx_utils/latest/src/onnx_utils.ipynb index 78203a45..14c810fa 100644 --- a/functions/development/onnx_utils/latest/src/onnx_utils.ipynb +++ b/functions/development/onnx_utils/latest/src/onnx_utils.ipynb @@ -77,9 +77,9 @@ "source": [ "### 1.2. Demo\n", "\n", - "We will use the `TF.Keras` framework, a `MobileNetV2` as our model and we will convert it to ONNX using the `to_onnx` handler.\n", + "We will use the `PyTorch` framework, a `MobileNetV2` as our model and we will convert it to ONNX using the `to_onnx` handler.\n", "\n", - "1.2.1. First we will set a temporary artifact path for our model to be saved in and choose the models names:" + "1.2.1. First we will set the artifact path for our model to be saved in and choose the models names:" ] }, { @@ -87,16 +87,21 @@ "metadata": { "pycharm": { "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:13:28.256582Z", + "start_time": "2026-02-10T14:13:28.250886Z" } }, "source": [ "import os\n", - "os.environ[\"TF_USE_LEGACY_KERAS\"] = \"true\"\n", - "from tempfile import TemporaryDirectory\n", + "import tempfile\n", + "# Use a temporary directory for model artifacts (safe cleanup):\n", + "ARTIFACT_PATH = tempfile.mkdtemp()\n", + "os.environ[\"MLRUN_ARTIFACT_PATH\"] = ARTIFACT_PATH\n", "\n", - "# Create a temporary directory for the model artifact:\n", - "ARTIFACT_PATH = TemporaryDirectory().name\n", - "os.makedirs(ARTIFACT_PATH)\n", + "# Project name:\n", + "PROJECT_NAME = \"onnx-utils\"\n", "\n", "# Choose our model's name:\n", "MODEL_NAME = \"mobilenetv2\"\n", @@ -108,7 +113,7 @@ "OPTIMIZED_ONNX_MODEL_NAME = \"optimized_onnx_mobilenetv2\"" ], "outputs": [], - "execution_count": null + "execution_count": 1 }, { "cell_type": "markdown", @@ -118,87 +123,88 @@ } }, "source": [ - "1.2.2. Download the model from `keras.applications` and log it with MLRun's `TFKerasModelHandler`:" + "1.2.2. Download the model from `torchvision.models` and log it with MLRun's `PyTorchModelHandler`:" ] }, { - "cell_type": "code", "metadata": { - "pycharm": { - "name": "#%%\n" + "ExecuteTime": { + "end_time": "2026-02-10T14:00:15.032590Z", + "start_time": "2026-02-10T14:00:15.031196Z" } }, - "source": [ - "# mlrun: start-code" - ], + "cell_type": "code", + "source": "# mlrun: start-code", "outputs": [], - "execution_count": null + "execution_count": 8 }, { + "metadata": { + "ExecuteTime": { + "end_time": "2026-02-10T14:14:00.992001Z", + "start_time": "2026-02-10T14:13:33.115438Z" + } + }, "cell_type": "code", - "metadata": {}, "source": [ - "from tensorflow import keras\n", + "import torchvision\n", "\n", "import mlrun\n", - "import mlrun.frameworks.tf_keras as mlrun_tf_keras\n", + "from mlrun.frameworks.pytorch import PyTorchModelHandler\n", "\n", "\n", "def get_model(context: mlrun.MLClientCtx, model_name: str):\n", " # Download the MobileNetV2 model:\n", - " model = keras.applications.mobilenet_v2.MobileNetV2()\n", + " model = torchvision.models.mobilenet_v2()\n", "\n", " # Initialize a model handler for logging the model:\n", - " model_handler = mlrun_tf_keras.TFKerasModelHandler(\n", + " model_handler = PyTorchModelHandler(\n", " model_name=model_name,\n", " model=model,\n", - " context=context\n", + " model_class=\"mobilenet_v2\",\n", + " modules_map={\"torchvision.models\": \"mobilenet_v2\"},\n", + " context=context,\n", " )\n", "\n", " # Log the model:\n", " model_handler.log()" ], "outputs": [], - "execution_count": null + "execution_count": 2 }, { - "cell_type": "code", "metadata": { - "pycharm": { - "name": "#%%\n" + "ExecuteTime": { + "end_time": "2026-02-10T14:00:15.040221Z", + "start_time": "2026-02-10T14:00:15.038886Z" } }, - "source": [ - "# mlrun: end-code" - ], + "cell_type": "code", + "source": "# mlrun: end-code", "outputs": [], - "execution_count": null - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "1.2.3. Create the function using MLRun's `code_to_function` and run it:" - ] + "execution_count": 10 }, { "cell_type": "code", "metadata": { "pycharm": { "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:14:34.429194Z", + "start_time": "2026-02-10T14:14:07.906087Z" } }, "source": [ "import mlrun\n", "\n", + "# Create or get the MLRun project:\n", + "project = mlrun.get_or_create_project(PROJECT_NAME, context=\"./\")\n", "\n", "# Create the function parsing this notebook's code using 'code_to_function':\n", "get_model_function = mlrun.code_to_function(\n", " name=\"get_mobilenetv2\",\n", + " project=PROJECT_NAME,\n", " kind=\"job\",\n", " image=\"mlrun/ml-models\"\n", ")\n", @@ -206,15 +212,267 @@ "# Run the function to log the model:\n", "get_model_run = get_model_function.run(\n", " handler=\"get_model\",\n", - " artifact_path=ARTIFACT_PATH,\n", + " output_path=ARTIFACT_PATH,\n", " params={\n", " \"model_name\": MODEL_NAME\n", " },\n", " local=True\n", ")" ], - "outputs": [], - "execution_count": null + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:14:24,932 [info] Created and saved project: {\"context\":\"./\",\"from_template\":null,\"name\":\"onnx-utils\",\"overwrite\":false,\"save\":true}\n", + "> 2026-02-10 16:14:24,933 [info] Project created successfully: {\"project_name\":\"onnx-utils\",\"stored_in_db\":true}\n", + "> 2026-02-10 16:14:31,659 [info] Storing function: {\"db\":null,\"name\":\"get-mobilenetv2-get-model\",\"uid\":\"7b9d1b54375b44e191d73685a382c910\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "\n", + "
\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:14:32NaTcompletedrunget-mobilenetv2-get-model
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_name=mobilenetv2
mobilenetv2_modules_map.json=store://artifacts/onnx-utils/#0@7b9d1b54375b44e191d73685a382c910
model=store://models/onnx-utils/mobilenetv2#0@7b9d1b54375b44e191d73685a382c910^e0393bc5b070fd55cc57cecb94160ce412498e0f
\n", + "
\n", + "
\n", + "
\n", + " Title\n", + " ×\n", + "
\n", + " \n", + "
\n", + "
\n" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + " > to track results use the .show() or .logs() methods or click here to open in UI" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:14:34,427 [info] Run execution finished: {\"name\":\"get-mobilenetv2-get-model\",\"status\":\"completed\"}\n" + ] + } + ], + "execution_count": 3 }, { "cell_type": "markdown", @@ -228,33 +486,271 @@ "metadata": { "pycharm": { "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:14:53.863947Z", + "start_time": "2026-02-10T14:14:48.088349Z" } }, - "source": [ - "# Import the ONNX function from the marketplace:\n", - "onnx_utils_function = mlrun.import_function(\"hub://onnx_utils\")\n", - "\n", - "# Run the function to convert our model to ONNX:\n", - "to_onnx_run = onnx_utils_function.run(\n", - " handler=\"to_onnx\",\n", - " artifact_path=ARTIFACT_PATH,\n", - " params={\n", - " \"model_name\": MODEL_NAME,\n", - " \"model_path\": get_model_run.outputs[MODEL_NAME], # <- Take the logged model from the previous function.\n", - " \"onnx_model_name\": ONNX_MODEL_NAME,\n", - " \"optimize_model\": False # <- For optimizing it later in the demo, we mark the flag as False\n", - " },\n", - " local=True\n", - ")" + "source": "# Import the ONNX function from the marketplace:\nonnx_utils_function = mlrun.import_function(\"hub://onnx_utils\", project=PROJECT_NAME)\n\n# Construct the model path from the run directory structure:\nmodel_path = os.path.join(ARTIFACT_PATH, \"get-mobilenetv2-get-model\", \"0\", \"model\")\nmodules_map_path = os.path.join(ARTIFACT_PATH, \"get-mobilenetv2-get-model\", \"0\", \"mobilenetv2_modules_map.json.json\")\n\n# Run the function to convert our model to ONNX:\nto_onnx_run = onnx_utils_function.run(\n handler=\"to_onnx\",\n output_path=ARTIFACT_PATH,\n params={\n \"model_name\": MODEL_NAME,\n \"model_path\": model_path,\n \"load_model_kwargs\": {\n \"model_name\": MODEL_NAME,\n \"model_class\": \"mobilenet_v2\",\n \"modules_map\": modules_map_path,\n },\n \"onnx_model_name\": ONNX_MODEL_NAME,\n \"optimize_model\": False, # <- For optimizing it later in the demo, we mark the flag as False\n \"framework_kwargs\": {\"input_signature\": [((32, 3, 224, 224), \"float32\")]},\n },\n local=True\n)", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:14:48,519 [info] Storing function: {\"db\":null,\"name\":\"onnx-utils-to-onnx\",\"uid\":\"95deb2c7dbf0460291efb25c48eeebd7\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "\n", + "
\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:14:49NaTcompletedrunonnx-utils-to-onnx
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_name=mobilenetv2
model_path=/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/get-mobilenetv2-get-model/0/model
load_model_kwargs={'model_name': 'mobilenetv2', 'model_class': 'mobilenet_v2', 'modules_map': '/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/get-mobilenetv2-get-model/0/mobilenetv2_modules_map.json.json'}
onnx_model_name=onnx_mobilenetv2
optimize_model=False
framework_kwargs={'input_signature': [((32, 3, 224, 224), 'float32')]}
model=store://models/onnx-utils/onnx_mobilenetv2#0@95deb2c7dbf0460291efb25c48eeebd7^03e4286da44d015cf5465d43e809a504d15f7f63
\n", + "
\n", + "
\n", + "
\n", + " Title\n", + " ×\n", + "
\n", + " \n", + "
\n", + "
\n" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + " > to track results use the .show() or .logs() methods or click here to open in UI" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:14:53,862 [info] Run execution finished: {\"name\":\"onnx-utils-to-onnx\",\"status\":\"completed\"}\n" + ] + } ], - "outputs": [], - "execution_count": null + "execution_count": 4 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "1.2.5. Now, listing the artifact directory we will see both our `tf.keras` model and the `onnx` model:" + "1.2.5. Now we verify the ONNX model was created:" ] }, { @@ -262,16 +758,29 @@ "metadata": { "pycharm": { "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:14:56.820411Z", + "start_time": "2026-02-10T14:14:56.817892Z" } }, "source": [ "import os\n", "\n", - "\n", - "print(os.listdir(ARTIFACT_PATH))" + "onnx_model_file = os.path.join(ARTIFACT_PATH, \"onnx-utils-to-onnx\", \"0\", \"model\", \"onnx_mobilenetv2.onnx\")\n", + "assert os.path.isfile(onnx_model_file), f\"ONNX model not found at {onnx_model_file}\"\n", + "print(f\"ONNX model created at: {onnx_model_file}\")" ], - "outputs": [], - "execution_count": null + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "ONNX model created at: /var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-to-onnx/0/model/onnx_mobilenetv2.onnx\n" + ] + } + ], + "execution_count": 5 }, { "cell_type": "markdown", @@ -308,28 +817,281 @@ "metadata": { "pycharm": { "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:15:03.415997Z", + "start_time": "2026-02-10T14:15:00.637332Z" } }, - "source": [ - "onnx_utils_function.run(\n", - " handler=\"optimize\",\n", - " artifact_path=ARTIFACT_PATH,\n", - " params={\n", - " \"model_name\": ONNX_MODEL_NAME,\n", - " \"model_path\": to_onnx_run.output(ONNX_MODEL_NAME), # <- Take the logged model from the previous function.\n", - " \"optimized_model_name\": OPTIMIZED_ONNX_MODEL_NAME,\n", - " },\n", - " local=True\n", - ")" + "source": "# Construct the ONNX model path from the run directory structure:\nonnx_model_path = os.path.join(ARTIFACT_PATH, \"onnx-utils-to-onnx\", \"0\", \"model\")\n\nonnx_utils_function.run(\n handler=\"optimize\",\n output_path=ARTIFACT_PATH,\n params={\n \"model_path\": onnx_model_path,\n \"handler_init_kwargs\": {\"model_name\": ONNX_MODEL_NAME},\n \"optimized_model_name\": OPTIMIZED_ONNX_MODEL_NAME,\n },\n local=True\n)", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:15:00,639 [info] Storing function: {\"db\":null,\"name\":\"onnx-utils-optimize\",\"uid\":\"0c30d7af94814dcabde8152a1951fb5d\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "\n", + "
\n", + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:15:01NaTcompletedrunonnx-utils-optimize
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_path=/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-to-onnx/0/model
handler_init_kwargs={'model_name': 'onnx_mobilenetv2'}
optimized_model_name=optimized_onnx_mobilenetv2
model=store://models/onnx-utils/optimized_onnx_mobilenetv2#0@0c30d7af94814dcabde8152a1951fb5d^599547984e83a664dc1c2708607d06731edb5ac2
\n", + "
\n", + "
\n", + "
\n", + " Title\n", + " ×\n", + "
\n", + " \n", + "
\n", + "
\n" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + " > to track results use the .show() or .logs() methods or click here to open in UI" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "> 2026-02-10 16:15:03,414 [info] Run execution finished: {\"name\":\"onnx-utils-optimize\",\"status\":\"completed\"}\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } ], - "outputs": [], - "execution_count": null + "execution_count": 6 }, { "cell_type": "markdown", "metadata": {}, "source": [ - "2.2.2. And now our model was optimized and can be seen under the `ARTIFACT_PATH`:" + "2.2.2. And now our model was optimized. Let us verify:" ] }, { @@ -337,13 +1099,27 @@ "metadata": { "pycharm": { "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:15:05.748413Z", + "start_time": "2026-02-10T14:15:05.745309Z" } }, "source": [ - "print(os.listdir(ARTIFACT_PATH))" + "optimized_model_file = os.path.join(ARTIFACT_PATH, \"onnx-utils-optimize\", \"0\", \"model\", \"optimized_onnx_mobilenetv2.onnx\")\n", + "assert os.path.isfile(optimized_model_file), f\"Optimized ONNX model not found at {optimized_model_file}\"\n", + "print(f\"Optimized ONNX model created at: {optimized_model_file}\")" ], - "outputs": [], - "execution_count": null + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Optimized ONNX model created at: /var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-optimize/0/model/optimized_onnx_mobilenetv2.onnx\n" + ] + } + ], + "execution_count": 7 }, { "cell_type": "markdown", @@ -353,7 +1129,7 @@ } }, "source": [ - "Lastly, run this code to clean up the models:" + "Lastly, run this code to clean up all generated files and directories:" ] }, { @@ -361,23 +1137,22 @@ "metadata": { "pycharm": { "name": "#%%\n" + }, + "ExecuteTime": { + "end_time": "2026-02-10T14:00:28.409998Z", + "start_time": "2026-02-10T13:57:21.679146Z" } }, - "source": [ - "import shutil\n", - "\n", - "\n", - "shutil.rmtree(ARTIFACT_PATH)" - ], + "source": "import shutil\n\n# Clean up the temporary artifact directory:\nif os.path.exists(ARTIFACT_PATH):\n shutil.rmtree(ARTIFACT_PATH)", "outputs": [], "execution_count": null } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "mlrun_functions", "language": "python", - "name": "python3" + "name": "mlrun_functions" }, "language_info": { "codemirror_mode": { diff --git a/functions/development/onnx_utils/latest/src/requirements.txt b/functions/development/onnx_utils/latest/src/requirements.txt index d3d7dfd6..912b3d7e 100644 --- a/functions/development/onnx_utils/latest/src/requirements.txt +++ b/functions/development/onnx_utils/latest/src/requirements.txt @@ -1,11 +1,10 @@ tqdm~=4.67.1 tensorflow~=2.19.0 tf_keras~=2.19.0 -torch~=2.6.0 -torchvision~=0.21.0 +torch~=2.8 +torchvision~=0.23.0 onnx~=1.17.0 onnxruntime~=1.19.2 onnxoptimizer~=0.3.13 onnxmltools~=1.13.0 -tf2onnx~=1.16.1 -plotly~=5.23 +plotly~=5.23 \ No newline at end of file diff --git a/functions/development/onnx_utils/latest/src/test_onnx_utils.py b/functions/development/onnx_utils/latest/src/test_onnx_utils.py index 2e01782f..59c6c2b3 100644 --- a/functions/development/onnx_utils/latest/src/test_onnx_utils.py +++ b/functions/development/onnx_utils/latest/src/test_onnx_utils.py @@ -17,6 +17,9 @@ import tempfile import mlrun +import pytest + +PROJECT_NAME = "onnx-utils" # Choose our model's name: MODEL_NAME = "model" @@ -27,41 +30,67 @@ # Choose our optimized ONNX version model's name: OPTIMIZED_ONNX_MODEL_NAME = f"optimized_{ONNX_MODEL_NAME}" +REQUIRED_ENV_VARS = [ + "MLRUN_DBPATH", + "MLRUN_ARTIFACT_PATH", + "V3IO_USERNAME", + "V3IO_ACCESS_KEY", +] -def _setup_environment() -> str: - """ - Setup the test environment, creating the artifacts path of the test. - :returns: The temporary directory created for the test artifacts path. +def _validate_environment_variables() -> bool: """ - artifact_path = tempfile.TemporaryDirectory().name - os.makedirs(artifact_path) - return artifact_path + Checks that all required Environment variables are set. + """ + environment_keys = os.environ.keys() + return all(key in environment_keys for key in REQUIRED_ENV_VARS) -def _cleanup_environment(artifact_path: str): +def _is_tf2onnx_available() -> bool: """ - Cleanup the test environment, deleting files and artifacts created during the test. - - :param artifact_path: The artifact path to delete. + Check if tf2onnx is installed (required for TensorFlow/Keras ONNX conversion). """ - # Clean the local directory: + try: + import tf2onnx + return True + except ImportError: + return False + + +@pytest.fixture(scope="session") +def onnx_project(): + """Create/get the MLRun project once per test session.""" + return mlrun.get_or_create_project(PROJECT_NAME, context="./") + + +@pytest.fixture(autouse=True) +def test_environment(onnx_project): + """Setup and cleanup test artifacts for each test.""" + artifact_path = tempfile.mkdtemp() + yield artifact_path + # Cleanup - only remove files/dirs from the directory containing this test file, + # never from an arbitrary CWD (which could be the project root). + test_dir = os.path.dirname(os.path.abspath(__file__)) for test_output in [ - *os.listdir(artifact_path), "schedules", "runs", "artifacts", "functions", + "model.pt", + "model.zip", + "model_modules_map.json", + "model_modules_map.json.json", + "onnx_model.onnx", + "optimized_onnx_model.onnx", ]: - test_output_path = os.path.abspath(f"./{test_output}") + test_output_path = os.path.join(test_dir, test_output) if os.path.exists(test_output_path): if os.path.isdir(test_output_path): shutil.rmtree(test_output_path) else: os.remove(test_output_path) - - # Clean the artifacts directory: - shutil.rmtree(artifact_path) + if os.path.exists(artifact_path): + shutil.rmtree(artifact_path) def _log_tf_keras_model(context: mlrun.MLClientCtx, model_name: str): @@ -114,42 +143,55 @@ def _log_pytorch_model(context: mlrun.MLClientCtx, model_name: str): model_handler.log() -def test_to_onnx_help(): +@pytest.mark.skipif( + condition=not _validate_environment_variables(), + reason="Project's environment variables are not set", +) +def test_to_onnx_help(test_environment): """ Test the 'to_onnx' handler, passing "help" in the 'framework_kwargs'. """ - # Setup the tests environment: - artifact_path = _setup_environment() + artifact_path = test_environment # Create the function: log_model_function = mlrun.code_to_function( filename="test_onnx_utils.py", name="log_model", + project=PROJECT_NAME, kind="job", image="mlrun/ml-models", ) # Run the function to log the model: - log_model_run = log_model_function.run( - handler="_log_tf_keras_model", - artifact_path=artifact_path, + log_model_function.run( + handler="_log_pytorch_model", + output_path=artifact_path, params={"model_name": MODEL_NAME}, local=True, ) + # Get artifact paths - construct from artifact_path and run structure + run_artifact_dir = os.path.join(artifact_path, "log-model--log-pytorch-model", "0") + model_path = os.path.join(run_artifact_dir, "model") + modules_map_path = os.path.join(run_artifact_dir, "model_modules_map.json.json") + # Import the ONNX Utils function: - onnx_function = mlrun.import_function("function.yaml") + onnx_function = mlrun.import_function("function.yaml", project=PROJECT_NAME) # Run the function, passing "help" in 'framework_kwargs' and see that no exception was raised: is_test_passed = True try: onnx_function.run( handler="to_onnx", - artifact_path=artifact_path, + output_path=artifact_path, params={ # Take the logged model from the previous function. - "model_path": log_model_run.status.artifacts[0]["spec"]["target_path"], - "load_model_kwargs": {"model_name": MODEL_NAME}, + "model_path": model_path, + "load_model_kwargs": { + "model_name": MODEL_NAME, + "model_class": "mobilenet_v2", + "modules_map": modules_map_path, + }, "framework_kwargs": "help", }, local=True, @@ -160,23 +202,28 @@ def test_to_onnx_help(): ) is_test_passed = False - # Cleanup the tests environment: - _cleanup_environment(artifact_path=artifact_path) - assert is_test_passed -def test_tf_keras_to_onnx(): +@pytest.mark.skipif( + condition=not _validate_environment_variables(), + reason="Project's environment variables are not set", +) +@pytest.mark.skipif( + condition=not _is_tf2onnx_available(), + reason="tf2onnx is not installed", +) +def test_tf_keras_to_onnx(test_environment): """ Test the 'to_onnx' handler, giving it a tf.keras model. """ - # Setup the tests environment: - artifact_path = _setup_environment() + artifact_path = test_environment # Create the function: log_model_function = mlrun.code_to_function( filename="test_onnx_utils.py", name="log_model", + project=PROJECT_NAME, kind="job", image="mlrun/ml-models", ) @@ -184,18 +231,18 @@ def test_tf_keras_to_onnx(): # Run the function to log the model: log_model_run = log_model_function.run( handler="_log_tf_keras_model", - artifact_path=artifact_path, + output_path=artifact_path, params={"model_name": MODEL_NAME}, local=True, ) # Import the ONNX Utils function: - onnx_function = mlrun.import_function("function.yaml") + onnx_function = mlrun.import_function("function.yaml", project=PROJECT_NAME) # Run the function to convert our model to ONNX: onnx_function_run = onnx_function.run( handler="to_onnx", - artifact_path=artifact_path, + output_path=artifact_path, params={ # Take the logged model from the previous function. "model_path": log_model_run.status.artifacts[0]["spec"]["target_path"], @@ -205,9 +252,6 @@ def test_tf_keras_to_onnx(): local=True, ) - # Cleanup the tests environment: - _cleanup_environment(artifact_path=artifact_path) - # Print the outputs list: print(f"Produced outputs: {onnx_function_run.outputs}") @@ -215,17 +259,21 @@ def test_tf_keras_to_onnx(): assert "model" in onnx_function_run.outputs -def test_pytorch_to_onnx(): +@pytest.mark.skipif( + condition=not _validate_environment_variables(), + reason="Project's environment variables are not set", +) +def test_pytorch_to_onnx(test_environment): """ Test the 'to_onnx' handler, giving it a pytorch model. """ - # Setup the tests environment: - artifact_path = _setup_environment() + artifact_path = test_environment # Create the function: log_model_function = mlrun.code_to_function( filename="test_onnx_utils.py", name="log_model", + project=PROJECT_NAME, kind="job", image="mlrun/ml-models", ) @@ -233,25 +281,30 @@ def test_pytorch_to_onnx(): # Run the function to log the model: log_model_run = log_model_function.run( handler="_log_pytorch_model", - artifact_path=artifact_path, + output_path=artifact_path, params={"model_name": MODEL_NAME}, local=True, ) # Import the ONNX Utils function: - onnx_function = mlrun.import_function("function.yaml") + onnx_function = mlrun.import_function("function.yaml", project=PROJECT_NAME) + + # Get artifact paths - construct from artifact_path and run structure + run_artifact_dir = os.path.join(artifact_path, "log-model--log-pytorch-model", "0") + model_path = os.path.join(run_artifact_dir, "model") + modules_map_path = os.path.join(run_artifact_dir, "model_modules_map.json.json") # Run the function to convert our model to ONNX: onnx_function_run = onnx_function.run( handler="to_onnx", - artifact_path=artifact_path, + output_path=artifact_path, params={ # Take the logged model from the previous function. - "model_path": log_model_run.status.artifacts[1]["spec"]["target_path"], + "model_path": model_path, "load_model_kwargs": { "model_name": MODEL_NAME, "model_class": "mobilenet_v2", - "modules_map": log_model_run.status.artifacts[0]["spec"]["target_path"], + "modules_map": modules_map_path, }, "onnx_model_name": ONNX_MODEL_NAME, "framework_kwargs": {"input_signature": [((32, 3, 224, 224), "float32")]}, @@ -259,9 +312,6 @@ def test_pytorch_to_onnx(): local=True, ) - # Cleanup the tests environment: - _cleanup_environment(artifact_path=artifact_path) - # Print the outputs list: print(f"Produced outputs: {onnx_function_run.outputs}") @@ -269,22 +319,25 @@ def test_pytorch_to_onnx(): assert "model" in onnx_function_run.outputs -def test_optimize_help(): +@pytest.mark.skipif( + condition=not _validate_environment_variables(), + reason="Project's environment variables are not set", +) +def test_optimize_help(test_environment): """ Test the 'optimize' handler, passing "help" in the 'optimizations'. """ - # Setup the tests environment: - artifact_path = _setup_environment() + artifact_path = test_environment # Import the ONNX Utils function: - onnx_function = mlrun.import_function("function.yaml") + onnx_function = mlrun.import_function("function.yaml", project=PROJECT_NAME) # Run the function, passing "help" in 'optimizations' and see that no exception was raised: is_test_passed = True try: onnx_function.run( handler="optimize", - artifact_path=artifact_path, + output_path=artifact_path, params={ "model_path": "", "optimizations": "help", @@ -297,69 +350,81 @@ def test_optimize_help(): ) is_test_passed = False - # Cleanup the tests environment: - _cleanup_environment(artifact_path=artifact_path) - assert is_test_passed -def test_optimize(): +@pytest.mark.skipif( + condition=not _validate_environment_variables(), + reason="Project's environment variables are not set", +) +def test_optimize(test_environment): """ - Test the 'optimize' handler, giving it a model from the ONNX zoo git repository. + Test the 'optimize' handler, giving it a pytorch model converted to ONNX. """ - # Setup the tests environment: - artifact_path = _setup_environment() + artifact_path = test_environment # Create the function: log_model_function = mlrun.code_to_function( filename="test_onnx_utils.py", name="log_model", + project=PROJECT_NAME, kind="job", image="mlrun/ml-models", ) # Run the function to log the model: - log_model_run = log_model_function.run( - handler="_log_tf_keras_model", - artifact_path=artifact_path, + log_model_function.run( + handler="_log_pytorch_model", + output_path=artifact_path, params={"model_name": MODEL_NAME}, local=True, ) + # Get artifact paths - construct from artifact_path and run structure + run_artifact_dir = os.path.join(artifact_path, "log-model--log-pytorch-model", "0") + model_path = os.path.join(run_artifact_dir, "model") + modules_map_path = os.path.join(run_artifact_dir, "model_modules_map.json.json") + # Import the ONNX Utils function: - onnx_function = mlrun.import_function("function.yaml") + onnx_function = mlrun.import_function("function.yaml", project=PROJECT_NAME) # Run the function to convert our model to ONNX: - to_onnx_function_run = onnx_function.run( + onnx_function.run( handler="to_onnx", - artifact_path=artifact_path, + output_path=artifact_path, params={ # Take the logged model from the previous function. - "model_path": log_model_run.status.artifacts[0]["spec"]["target_path"], - "load_model_kwargs": {"model_name": MODEL_NAME}, + "model_path": model_path, + "load_model_kwargs": { + "model_name": MODEL_NAME, + "model_class": "mobilenet_v2", + "modules_map": modules_map_path, + }, "onnx_model_name": ONNX_MODEL_NAME, + "framework_kwargs": {"input_signature": [((32, 3, 224, 224), "float32")]}, }, local=True, ) + # Get the ONNX model path from the to_onnx run output + onnx_run_artifact_dir = os.path.join( + artifact_path, "onnx-utils-to-onnx", "0" + ) + onnx_model_path = os.path.join(onnx_run_artifact_dir, "model") + # Run the function to optimize our model: optimize_function_run = onnx_function.run( handler="optimize", - artifact_path=artifact_path, + output_path=artifact_path, params={ # Take the logged model from the previous function. - "model_path": to_onnx_function_run.status.artifacts[0]["spec"][ - "target_path" - ], + "model_path": onnx_model_path, "handler_init_kwargs": {"model_name": ONNX_MODEL_NAME}, "optimized_model_name": OPTIMIZED_ONNX_MODEL_NAME, }, local=True, ) - # Cleanup the tests environment: - _cleanup_environment(artifact_path=artifact_path) - # Print the outputs list: print(f"Produced outputs: {optimize_function_run.outputs}") diff --git a/functions/development/onnx_utils/latest/static/example.html b/functions/development/onnx_utils/latest/static/example.html index 9a5ae765..add12b6c 100644 --- a/functions/development/onnx_utils/latest/static/example.html +++ b/functions/development/onnx_utils/latest/static/example.html @@ -251,17 +251,18 @@

Supported keyword arguments (

1.2. Demo#

-

We will use the TF.Keras framework, a MobileNetV2 as our model and we will convert it to ONNX using the to_onnx handler.

-

1.2.1. First we will set a temporary artifact path for our model to be saved in and choose the models names:

+

We will use the PyTorch framework, a MobileNetV2 as our model and we will convert it to ONNX using the to_onnx handler.

+

1.2.1. First we will set the artifact path for our model to be saved in and choose the models names:

import os
-os.environ["TF_USE_LEGACY_KERAS"] = "true"
-from tempfile import TemporaryDirectory
+import tempfile
+# Use a temporary directory for model artifacts (safe cleanup):
+ARTIFACT_PATH = tempfile.mkdtemp()
+os.environ["MLRUN_ARTIFACT_PATH"] = ARTIFACT_PATH
 
-# Create a temporary directory for the model artifact:
-ARTIFACT_PATH = TemporaryDirectory().name
-os.makedirs(ARTIFACT_PATH)
+# Project name:
+PROJECT_NAME = "onnx-utils"
 
 # Choose our model's name:
 MODEL_NAME = "mobilenetv2"
@@ -275,7 +276,7 @@ 

1.2. Demo#

-

1.2.2. Download the model from keras.applications and log it with MLRun’s TFKerasModelHandler:

+

1.2.2. Download the model from torchvision.models and log it with MLRun’s PyTorchModelHandler:

# mlrun: start-code
@@ -285,21 +286,23 @@ 

1.2. Demo#
-
from tensorflow import keras
+
-

1.2.3. Create the function using MLRun’s code_to_function and run it:

+
+
> 2026-02-10 16:14:24,932 [info] Created and saved project: {"context":"./","from_template":null,"name":"onnx-utils","overwrite":false,"save":true}
+> 2026-02-10 16:14:24,933 [info] Project created successfully: {"project_name":"onnx-utils","stored_in_db":true}
+> 2026-02-10 16:14:31,659 [info] Storing function: {"db":null,"name":"get-mobilenetv2-get-model","uid":"7b9d1b54375b44e191d73685a382c910"}
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:14:32NaTcompletedrunget-mobilenetv2-get-model
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_name=mobilenetv2
mobilenetv2_modules_map.json=store://artifacts/onnx-utils/#0@7b9d1b54375b44e191d73685a382c910
model=store://models/onnx-utils/mobilenetv2#0@7b9d1b54375b44e191d73685a382c910^e0393bc5b070fd55cc57cecb94160ce412498e0f
+
+ +
+

+
+
+
> 2026-02-10 16:14:34,427 [info] Run execution finished: {"name":"get-mobilenetv2-get-model","status":"completed"}
+
+
+

1.2.4. Import the onnx_utils MLRun function and run it:

# Import the ONNX function from the marketplace:
-onnx_utils_function = mlrun.import_function("hub://onnx_utils")
+onnx_utils_function = mlrun.import_function("hub://onnx_utils", project=PROJECT_NAME)
+
+# Construct the model path from the run directory structure:
+model_path = os.path.join(ARTIFACT_PATH, "get-mobilenetv2-get-model", "0", "model")
+modules_map_path = os.path.join(ARTIFACT_PATH, "get-mobilenetv2-get-model", "0", "mobilenetv2_modules_map.json.json")
 
 # Run the function to convert our model to ONNX:
 to_onnx_run = onnx_utils_function.run(
     handler="to_onnx",
-    artifact_path=ARTIFACT_PATH,
+    output_path=ARTIFACT_PATH,
     params={
         "model_name": MODEL_NAME,
-        "model_path": get_model_run.outputs[MODEL_NAME],  # <- Take the logged model from the previous function.
+        "model_path": model_path,
+        "load_model_kwargs": {
+            "model_name": MODEL_NAME,
+            "model_class": "mobilenet_v2",
+            "modules_map": modules_map_path,
+        },
         "onnx_model_name": ONNX_MODEL_NAME,
-        "optimize_model": False  # <- For optimizing it later in the demo, we mark the flag as False
+        "optimize_model": False,  # <- For optimizing it later in the demo, we mark the flag as False
+        "framework_kwargs": {"input_signature": [((32, 3, 224, 224), "float32")]},
     },
     local=True
 )
 
+
+
> 2026-02-10 16:14:48,519 [info] Storing function: {"db":null,"name":"onnx-utils-to-onnx","uid":"95deb2c7dbf0460291efb25c48eeebd7"}
+
+
+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:14:49NaTcompletedrunonnx-utils-to-onnx
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_name=mobilenetv2
model_path=/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/get-mobilenetv2-get-model/0/model
load_model_kwargs={'model_name': 'mobilenetv2', 'model_class': 'mobilenet_v2', 'modules_map': '/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/get-mobilenetv2-get-model/0/mobilenetv2_modules_map.json.json'}
onnx_model_name=onnx_mobilenetv2
optimize_model=False
framework_kwargs={'input_signature': [((32, 3, 224, 224), 'float32')]}
model=store://models/onnx-utils/onnx_mobilenetv2#0@95deb2c7dbf0460291efb25c48eeebd7^03e4286da44d015cf5465d43e809a504d15f7f63
+
+ +
+

+
+
+
> 2026-02-10 16:14:53,862 [info] Run execution finished: {"name":"onnx-utils-to-onnx","status":"completed"}
+
+
+
-

1.2.5. Now, listing the artifact directory we will see both our tf.keras model and the onnx model:

+

1.2.5. Now we verify the ONNX model was created:

import os
 
-
-print(os.listdir(ARTIFACT_PATH))
+onnx_model_file = os.path.join(ARTIFACT_PATH, "onnx-utils-to-onnx", "0", "model", "onnx_mobilenetv2.onnx")
+assert os.path.isfile(onnx_model_file), f"ONNX model not found at {onnx_model_file}"
+print(f"ONNX model created at: {onnx_model_file}")
+
+
+
+
+
ONNX model created at: /var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-to-onnx/0/model/onnx_mobilenetv2.onnx
 
@@ -399,12 +842,15 @@

2.2. Demo#2.2.1. We will call now the optimize handler:

-
onnx_utils_function.run(
+
# Construct the ONNX model path from the run directory structure:
+onnx_model_path = os.path.join(ARTIFACT_PATH, "onnx-utils-to-onnx", "0", "model")
+
+onnx_utils_function.run(
     handler="optimize",
-    artifact_path=ARTIFACT_PATH,
+    output_path=ARTIFACT_PATH,
     params={
-        "model_name": ONNX_MODEL_NAME,
-        "model_path": to_onnx_run.output(ONNX_MODEL_NAME),  # <- Take the logged model from the previous function.
+        "model_path": onnx_model_path,
+        "handler_init_kwargs": {"model_name": ONNX_MODEL_NAME},
         "optimized_model_name": OPTIMIZED_ONNX_MODEL_NAME,
     },
     local=True
@@ -412,22 +858,243 @@ 

2.2. Demo#

+
+
> 2026-02-10 16:15:00,639 [info] Storing function: {"db":null,"name":"onnx-utils-optimize","uid":"0c30d7af94814dcabde8152a1951fb5d"}
+
-

2.2.2. And now our model was optimized and can be seen under the ARTIFACT_PATH:

+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
projectuiditerstartendstatekindnamelabelsinputsparametersresultsartifact_uris
onnx-utils0Feb 10 14:15:01NaTcompletedrunonnx-utils-optimize
v3io_user=omerm
kind=local
owner=omerm
host=M-KCX16N69X3
model_path=/var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-to-onnx/0/model
handler_init_kwargs={'model_name': 'onnx_mobilenetv2'}
optimized_model_name=optimized_onnx_mobilenetv2
model=store://models/onnx-utils/optimized_onnx_mobilenetv2#0@0c30d7af94814dcabde8152a1951fb5d^599547984e83a664dc1c2708607d06731edb5ac2
+
+ +
+

+
+
+
> to track results use the .show() or .logs() methods or click here to open in UI
> 2026-02-10 16:15:03,414 [info] Run execution finished: {"name":"onnx-utils-optimize","status":"completed"}
+
+
+
<mlrun.model.RunObject at 0x106148190>
+
+
+
+
+

2.2.2. And now our model was optimized. Let us verify:

-
print(os.listdir(ARTIFACT_PATH))
+
optimized_model_file = os.path.join(ARTIFACT_PATH, "onnx-utils-optimize", "0", "model", "optimized_onnx_mobilenetv2.onnx")
+assert os.path.isfile(optimized_model_file), f"Optimized ONNX model not found at {optimized_model_file}"
+print(f"Optimized ONNX model created at: {optimized_model_file}")
 
+
+
Optimized ONNX model created at: /var/folders/rn/q8gs952n26982d36y50w_2rw0000gp/T/tmpvs5qvbxr/onnx-utils-optimize/0/model/optimized_onnx_mobilenetv2.onnx
+
+
-

Lastly, run this code to clean up the models:

+
+

Lastly, run this code to clean up all generated files and directories:

import shutil
 
-
-shutil.rmtree(ARTIFACT_PATH)
+# Clean up the temporary artifact directory:
+if os.path.exists(ARTIFACT_PATH):
+    shutil.rmtree(ARTIFACT_PATH)
 
diff --git a/functions/development/onnx_utils/latest/static/function.html b/functions/development/onnx_utils/latest/static/function.html index 4180c4cd..d1b5444b 100644 --- a/functions/development/onnx_utils/latest/static/function.html +++ b/functions/development/onnx_utils/latest/static/function.html @@ -28,42 +28,16 @@
         
-kind: job
 metadata:
+  name: onnx-utils
+  tag: ''
   categories:
   - utilities
   - deep-learning
-  name: onnx-utils
-  tag: ''
-verbose: false
+kind: job
 spec:
-  build:
-    code_origin: ''
-    base_image: mlrun/mlrun
-    origin_filename: ''
-    functionSourceCode: # Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Dict, List, Tuple

import mlrun


class _ToONNXConversions:
    """
    An ONNX conversion functions library class.
    """

    @staticmethod
    def tf_keras_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int], str]] = None,
    ):
        """
        Convert a TF.Keras model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:   An initialized TFKerasModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name`
                                will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:  Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                                Defaulted to True.
        :param input_signature: A list of the input layers shape and data type properties. Expected to receive a list
                                where each element is an input layer tuple. An input layer tuple is a tuple of:
                                [0] = Layer's shape, a tuple of integers.
                                [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                If None, the input signature will be tried to be read from the model artifact. Defaulted
                                to None.
        """
        # Import the framework and handler:
        import tensorflow as tf
        from mlrun.frameworks.tf_keras import TFKerasUtils

        # Check the given 'input_signature' parameter:
        if input_signature is None:
            # Read the inputs from the model:
            try:
                model_handler.read_inputs_from_model()
            except Exception as error:
                raise mlrun.errors.MLRunRuntimeError(
                    f"Please provide the 'input_signature' parameter. The function tried reading the input layers "
                    f"information automatically but failed with the following error: {error}"
                )
        else:
            # Parse the 'input_signature' parameter:
            input_signature = [
                tf.TensorSpec(
                    shape=shape,
                    dtype=TFKerasUtils.convert_value_type_to_tf_dtype(
                        value_type=value_type
                    ),
                )
                for (shape, value_type) in input_signature
            ]

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_signature=input_signature,
            optimize=optimize_model,
        )

    @staticmethod
    def pytorch_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int, ...], str]] = None,
        input_layers_names: List[str] = None,
        output_layers_names: List[str] = None,
        dynamic_axes: Dict[str, Dict[int, str]] = None,
        is_batched: bool = True,
    ):
        """
        Convert a PyTorch model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:       An initialized PyTorchModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name:     The name to use to log the converted ONNX model. If not given, the given
                                    `model_name` will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:      Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the
                                    model. Defaulted to True.
        :param input_signature:     A list of the input layers shape and data type properties. Expected to receive a
                                    list where each element is an input layer tuple. An input layer tuple is a tuple of:
                                    [0] = Layer's shape, a tuple of integers.
                                    [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                    If None, the input signature will be tried to be read from the model artifact.
                                    Defaulted to None.
        :param input_layers_names:  List of names to assign to the input nodes of the graph in order. All of the other
                                    parameters (inner layers) can be set as well by passing additional names in the
                                    list. The order is by the order of the parameters in the model. If None, the inputs
                                    will be read from the handler's inputs. If its also None, it is defaulted to:
                                    "input_0", "input_1", ...
        :param output_layers_names: List of names to assign to the output nodes of the graph in order. If None, the
                                    outputs will be read from the handler's outputs. If its also None, it is defaulted
                                    to: "output_0" (for multiple outputs, this parameter must be provided).
        :param dynamic_axes:        If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can
                                    specify it by giving a dynamic axis to the input / output layer by its name as
                                    follows: {
                                        "input layer name": {0: "batch_size"},
                                        "output layer name": {0: "batch_size"},
                                    }
                                    If provided, the 'is_batched' flag will be ignored. Defaulted to None.
        :param is_batched:          Whether to include a batch size as the first axis in every input and output layer.
                                    Defaulted to True. Will be ignored if 'dynamic_axes' is provided.
        """
        # Import the framework and handler:
        import torch
        from mlrun.frameworks.pytorch import PyTorchUtils

        # Parse the 'input_signature' parameter:
        if input_signature is not None:
            input_signature = tuple(
                [
                    torch.zeros(
                        size=shape,
                        dtype=PyTorchUtils.convert_value_type_to_torch_dtype(
                            value_type=value_type
                        ),
                    )
                    for (shape, value_type) in input_signature
                ]
            )

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_sample=input_signature,
            optimize=optimize_model,
            input_layers_names=input_layers_names,
            output_layers_names=output_layers_names,
            dynamic_axes=dynamic_axes,
            is_batched=is_batched,
        )


# Map for getting the conversion function according to the provided framework:
_CONVERSION_MAP = {
    "tensorflow.keras": _ToONNXConversions.tf_keras_to_onnx,
    "torch": _ToONNXConversions.pytorch_to_onnx,
}  # type: Dict[str, Callable]


def to_onnx(
    context: mlrun.MLClientCtx,
    model_path: str,
    load_model_kwargs: dict = None,
    onnx_model_name: str = None,
    optimize_model: bool = True,
    framework_kwargs: Dict[str, Any] = None,
):
    """
    Convert the given model to an ONNX model.

    :param context:           The MLRun function execution context
    :param model_path:        The model path store object.
    :param load_model_kwargs: Keyword arguments to pass to the `AutoMLRun.load_model` method.
    :param onnx_model_name:   The name to use to log the converted ONNX model. If not given, the given `model_name` will
                              be used with an additional suffix `_onnx`. Defaulted to None.
    :param optimize_model:    Whether to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                              Defaulted to True.
    :param framework_kwargs:  Additional arguments each framework may require to convert to ONNX. To get the doc string
                              of the desired framework onnx conversion function, pass "help".
    """
    from mlrun.frameworks.auto_mlrun.auto_mlrun import AutoMLRun

    # Get a model handler of the required framework:
    load_model_kwargs = load_model_kwargs or {}
    model_handler = AutoMLRun.load_model(
        model_path=model_path, context=context, **load_model_kwargs
    )

    # Get the model's framework:
    framework = model_handler.FRAMEWORK_NAME

    # Use the conversion map to get the specific framework to onnx conversion:
    if framework not in _CONVERSION_MAP:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"The following framework: '{framework}', has no ONNX conversion."
        )
    conversion_function = _CONVERSION_MAP[framework]

    # Check if needed to print the function's doc string ("help" is passed):
    if framework_kwargs == "help":
        print(conversion_function.__doc__)
        return

    # Set the default empty framework kwargs if needed:
    if framework_kwargs is None:
        framework_kwargs = {}

    # Run the conversion:
    try:
        conversion_function(
            model_handler=model_handler,
            onnx_model_name=onnx_model_name,
            optimize_model=optimize_model,
            **framework_kwargs,
        )
    except TypeError as exception:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"ERROR: A TypeError exception was raised during the conversion:\n{exception}. "
            f"Please read the {framework} framework conversion function doc string by passing 'help' in the "
            f"'framework_kwargs' dictionary parameter."
        )


def optimize(
    context: mlrun.MLClientCtx,
    model_path: str,
    handler_init_kwargs: dict = None,
    optimizations: List[str] = None,
    fixed_point: bool = False,
    optimized_model_name: str = None,
):
    """
    Optimize the given ONNX model.

    :param context:              The MLRun function execution context.
    :param model_path:           Path to the ONNX model object.
    :param handler_init_kwargs:  Keyword arguments to pass to the `ONNXModelHandler` init method preloading.
    :param optimizations:        List of possible optimizations. To see what optimizations are available, pass "help".
                                 If None, all the optimizations will be used. Defaulted to None.
    :param fixed_point:          Optimize the weights using fixed point. Defaulted to False.
    :param optimized_model_name: The name of the optimized model. If None, the original model will be overridden.
                                 Defaulted to None.
    """
    # Import the model handler:
    import onnxoptimizer
    from mlrun.frameworks.onnx import ONNXModelHandler

    # Check if needed to print the available optimizations ("help" is passed):
    if optimizations == "help":
        available_passes = "\n* ".join(onnxoptimizer.get_available_passes())
        print(f"The available optimizations are:\n* {available_passes}")
        return

    # Create the model handler:
    handler_init_kwargs = handler_init_kwargs or {}
    model_handler = ONNXModelHandler(
        model_path=model_path, context=context, **handler_init_kwargs
    )

    # Load the ONNX model:
    model_handler.load()

    # Optimize the model using the given configurations:
    model_handler.optimize(optimizations=optimizations, fixed_point=fixed_point)

    # Rename if needed:
    if optimized_model_name is not None:
        model_handler.set_model_name(model_name=optimized_model_name)

    # Log the optimized model:
    model_handler.log()

-    requirements:
-    - tqdm~=4.67.1
-    - tensorflow~=2.19.0
-    - tf_keras~=2.19.0
-    - torch~=2.6.0
-    - torchvision~=0.21.0
-    - onnx~=1.17.0
-    - onnxruntime~=1.19.2
-    - onnxoptimizer~=0.3.13
-    - onnxmltools~=1.13.0
-    - tf2onnx~=1.16.1
-    - plotly~=5.23
-    with_mlrun: false
-    auto_build: true
-  disable_auto_mount: false
-  description: ONNX intigration in MLRun, some utils functions for the ONNX framework,
-    optimizing and converting models from different framework to ONNX using MLRun.
-  image: ''
   entry_points:
     tf_keras_to_onnx:
-      doc: Convert a TF.Keras model to an ONNX model and log it back to MLRun as a
-        new model object.
       name: tf_keras_to_onnx
       parameters:
       - name: model_handler
@@ -88,12 +62,12 @@
           data type, a mlrun.data_types.ValueType string. If None, the input signature
           will be tried to be read from the model artifact. Defaulted to None.'
         default: null
+      doc: Convert a TF.Keras model to an ONNX model and log it back to MLRun as a
+        new model object.
+      lineno: 26
       has_varargs: false
       has_kwargs: false
-      lineno: 26
     pytorch_to_onnx:
-      doc: Convert a PyTorch model to an ONNX model and log it back to MLRun as a
-        new model object.
       name: pytorch_to_onnx
       parameters:
       - name: model_handler
@@ -146,11 +120,12 @@
         doc: Whether to include a batch size as the first axis in every input and
           output layer. Defaulted to True. Will be ignored if 'dynamic_axes' is provided.
         default: true
+      doc: Convert a PyTorch model to an ONNX model and log it back to MLRun as a
+        new model object.
+      lineno: 81
       has_varargs: false
       has_kwargs: false
-      lineno: 81
     to_onnx:
-      doc: Convert the given model to an ONNX model.
       name: to_onnx
       parameters:
       - name: context
@@ -180,11 +155,11 @@
           get the doc string of the desired framework onnx conversion function, pass
           "help".
         default: null
+      doc: Convert the given model to an ONNX model.
+      lineno: 160
       has_varargs: false
       has_kwargs: false
-      lineno: 160
     optimize:
-      doc: Optimize the given ONNX model.
       name: optimize
       parameters:
       - name: context
@@ -211,12 +186,37 @@
         doc: The name of the optimized model. If None, the original model will be
           overridden. Defaulted to None.
         default: null
+      doc: Optimize the given ONNX model.
+      lineno: 224
       has_varargs: false
       has_kwargs: false
-      lineno: 224
+  image: ''
   default_handler: to_onnx
   allow_empty_resources: true
   command: ''
+  disable_auto_mount: false
+  description: ONNX intigration in MLRun, some utils functions for the ONNX framework,
+    optimizing and converting models from different framework to ONNX using MLRun.
+  build:
+    functionSourceCode: # Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Dict, List, Tuple

import mlrun


class _ToONNXConversions:
    """
    An ONNX conversion functions library class.
    """

    @staticmethod
    def tf_keras_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int], str]] = None,
    ):
        """
        Convert a TF.Keras model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:   An initialized TFKerasModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name: The name to use to log the converted ONNX model. If not given, the given `model_name`
                                will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:  Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                                Defaulted to True.
        :param input_signature: A list of the input layers shape and data type properties. Expected to receive a list
                                where each element is an input layer tuple. An input layer tuple is a tuple of:
                                [0] = Layer's shape, a tuple of integers.
                                [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                If None, the input signature will be tried to be read from the model artifact. Defaulted
                                to None.
        """
        # Import the framework and handler:
        import tensorflow as tf
        from mlrun.frameworks.tf_keras import TFKerasUtils

        # Check the given 'input_signature' parameter:
        if input_signature is None:
            # Read the inputs from the model:
            try:
                model_handler.read_inputs_from_model()
            except Exception as error:
                raise mlrun.errors.MLRunRuntimeError(
                    f"Please provide the 'input_signature' parameter. The function tried reading the input layers "
                    f"information automatically but failed with the following error: {error}"
                )
        else:
            # Parse the 'input_signature' parameter:
            input_signature = [
                tf.TensorSpec(
                    shape=shape,
                    dtype=TFKerasUtils.convert_value_type_to_tf_dtype(
                        value_type=value_type
                    ),
                )
                for (shape, value_type) in input_signature
            ]

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_signature=input_signature,
            optimize=optimize_model,
        )

    @staticmethod
    def pytorch_to_onnx(
        model_handler,
        onnx_model_name: str = None,
        optimize_model: bool = True,
        input_signature: List[Tuple[Tuple[int, ...], str]] = None,
        input_layers_names: List[str] = None,
        output_layers_names: List[str] = None,
        dynamic_axes: Dict[str, Dict[int, str]] = None,
        is_batched: bool = True,
    ):
        """
        Convert a PyTorch model to an ONNX model and log it back to MLRun as a new model object.

        :param model_handler:       An initialized PyTorchModelHandler with a loaded model to convert to ONNX.
        :param onnx_model_name:     The name to use to log the converted ONNX model. If not given, the given
                                    `model_name` will be used with an additional suffix `_onnx`. Defaulted to None.
        :param optimize_model:      Whether or not to optimize the ONNX model using 'onnxoptimizer' before saving the
                                    model. Defaulted to True.
        :param input_signature:     A list of the input layers shape and data type properties. Expected to receive a
                                    list where each element is an input layer tuple. An input layer tuple is a tuple of:
                                    [0] = Layer's shape, a tuple of integers.
                                    [1] = Layer's data type, a mlrun.data_types.ValueType string.
                                    If None, the input signature will be tried to be read from the model artifact.
                                    Defaulted to None.
        :param input_layers_names:  List of names to assign to the input nodes of the graph in order. All of the other
                                    parameters (inner layers) can be set as well by passing additional names in the
                                    list. The order is by the order of the parameters in the model. If None, the inputs
                                    will be read from the handler's inputs. If its also None, it is defaulted to:
                                    "input_0", "input_1", ...
        :param output_layers_names: List of names to assign to the output nodes of the graph in order. If None, the
                                    outputs will be read from the handler's outputs. If its also None, it is defaulted
                                    to: "output_0" (for multiple outputs, this parameter must be provided).
        :param dynamic_axes:        If part of the input / output shape is dynamic, like (batch_size, 3, 32, 32) you can
                                    specify it by giving a dynamic axis to the input / output layer by its name as
                                    follows: {
                                        "input layer name": {0: "batch_size"},
                                        "output layer name": {0: "batch_size"},
                                    }
                                    If provided, the 'is_batched' flag will be ignored. Defaulted to None.
        :param is_batched:          Whether to include a batch size as the first axis in every input and output layer.
                                    Defaulted to True. Will be ignored if 'dynamic_axes' is provided.
        """
        # Import the framework and handler:
        import torch
        from mlrun.frameworks.pytorch import PyTorchUtils

        # Parse the 'input_signature' parameter:
        if input_signature is not None:
            input_signature = tuple(
                [
                    torch.zeros(
                        size=shape,
                        dtype=PyTorchUtils.convert_value_type_to_torch_dtype(
                            value_type=value_type
                        ),
                    )
                    for (shape, value_type) in input_signature
                ]
            )

        # Convert to ONNX:
        model_handler.to_onnx(
            model_name=onnx_model_name,
            input_sample=input_signature,
            optimize=optimize_model,
            input_layers_names=input_layers_names,
            output_layers_names=output_layers_names,
            dynamic_axes=dynamic_axes,
            is_batched=is_batched,
        )


# Map for getting the conversion function according to the provided framework:
_CONVERSION_MAP = {
    "tensorflow.keras": _ToONNXConversions.tf_keras_to_onnx,
    "torch": _ToONNXConversions.pytorch_to_onnx,
}  # type: Dict[str, Callable]


def to_onnx(
    context: mlrun.MLClientCtx,
    model_path: str,
    load_model_kwargs: dict = None,
    onnx_model_name: str = None,
    optimize_model: bool = True,
    framework_kwargs: Dict[str, Any] = None,
):
    """
    Convert the given model to an ONNX model.

    :param context:           The MLRun function execution context
    :param model_path:        The model path store object.
    :param load_model_kwargs: Keyword arguments to pass to the `AutoMLRun.load_model` method.
    :param onnx_model_name:   The name to use to log the converted ONNX model. If not given, the given `model_name` will
                              be used with an additional suffix `_onnx`. Defaulted to None.
    :param optimize_model:    Whether to optimize the ONNX model using 'onnxoptimizer' before saving the model.
                              Defaulted to True.
    :param framework_kwargs:  Additional arguments each framework may require to convert to ONNX. To get the doc string
                              of the desired framework onnx conversion function, pass "help".
    """
    from mlrun.frameworks.auto_mlrun.auto_mlrun import AutoMLRun

    # Get a model handler of the required framework:
    load_model_kwargs = load_model_kwargs or {}
    model_handler = AutoMLRun.load_model(
        model_path=model_path, context=context, **load_model_kwargs
    )

    # Get the model's framework:
    framework = model_handler.FRAMEWORK_NAME

    # Use the conversion map to get the specific framework to onnx conversion:
    if framework not in _CONVERSION_MAP:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"The following framework: '{framework}', has no ONNX conversion."
        )
    conversion_function = _CONVERSION_MAP[framework]

    # Check if needed to print the function's doc string ("help" is passed):
    if framework_kwargs == "help":
        print(conversion_function.__doc__)
        return

    # Set the default empty framework kwargs if needed:
    if framework_kwargs is None:
        framework_kwargs = {}

    # Run the conversion:
    try:
        conversion_function(
            model_handler=model_handler,
            onnx_model_name=onnx_model_name,
            optimize_model=optimize_model,
            **framework_kwargs,
        )
    except TypeError as exception:
        raise mlrun.errors.MLRunInvalidArgumentError(
            f"ERROR: A TypeError exception was raised during the conversion:\n{exception}. "
            f"Please read the {framework} framework conversion function doc string by passing 'help' in the "
            f"'framework_kwargs' dictionary parameter."
        )


def optimize(
    context: mlrun.MLClientCtx,
    model_path: str,
    handler_init_kwargs: dict = None,
    optimizations: List[str] = None,
    fixed_point: bool = False,
    optimized_model_name: str = None,
):
    """
    Optimize the given ONNX model.

    :param context:              The MLRun function execution context.
    :param model_path:           Path to the ONNX model object.
    :param handler_init_kwargs:  Keyword arguments to pass to the `ONNXModelHandler` init method preloading.
    :param optimizations:        List of possible optimizations. To see what optimizations are available, pass "help".
                                 If None, all the optimizations will be used. Defaulted to None.
    :param fixed_point:          Optimize the weights using fixed point. Defaulted to False.
    :param optimized_model_name: The name of the optimized model. If None, the original model will be overridden.
                                 Defaulted to None.
    """
    # Import the model handler:
    import onnxoptimizer
    from mlrun.frameworks.onnx import ONNXModelHandler

    # Check if needed to print the available optimizations ("help" is passed):
    if optimizations == "help":
        available_passes = "\n* ".join(onnxoptimizer.get_available_passes())
        print(f"The available optimizations are:\n* {available_passes}")
        return

    # Create the model handler:
    handler_init_kwargs = handler_init_kwargs or {}
    model_handler = ONNXModelHandler(
        model_path=model_path, context=context, **handler_init_kwargs
    )

    # Load the ONNX model:
    model_handler.load()

    # Optimize the model using the given configurations:
    model_handler.optimize(optimizations=optimizations, fixed_point=fixed_point)

    # Rename if needed:
    if optimized_model_name is not None:
        model_handler.set_model_name(model_name=optimized_model_name)

    # Log the optimized model:
    model_handler.log()

+    base_image: mlrun/mlrun
+    with_mlrun: false
+    auto_build: true
+    requirements:
+    - tqdm~=4.67.1
+    - tensorflow~=2.19.0
+    - tf_keras~=2.19.0
+    - torch~=2.8.0
+    - torchvision~=0.23.0
+    - onnx~=1.17.0
+    - onnxruntime~=1.19.2
+    - onnxoptimizer~=0.3.13
+    - onnxmltools~=1.13.0
+    - tf2onnx~=1.16.1
+    - plotly~=5.23
+    origin_filename: ''
+    code_origin: ''
+verbose: false
 
         
     
diff --git a/functions/development/onnx_utils/latest/static/item.html b/functions/development/onnx_utils/latest/static/item.html index fdd7dc51..b4662c63 100644 --- a/functions/development/onnx_utils/latest/static/item.html +++ b/functions/development/onnx_utils/latest/static/item.html @@ -43,7 +43,7 @@ author: Iguazio maintainers: [] marketplaceType: '' -mlrunVersion: 1.7.2 +mlrunVersion: 1.10.0 name: onnx_utils platformVersion: 3.5.0 spec: @@ -60,8 +60,8 @@ - tqdm~=4.67.1 - tensorflow~=2.19.0 - tf_keras~=2.19.0 - - torch~=2.6.0 - - torchvision~=0.21.0 + - torch~=2.8.0 + - torchvision~=0.23.0 - onnx~=1.17.0 - onnxruntime~=1.19.2 - onnxoptimizer~=0.3.13 @@ -69,7 +69,7 @@ - tf2onnx~=1.16.1 - plotly~=5.23 url: '' -version: 1.3.0 +version: 1.4.0

diff --git a/functions/development/tags.json b/functions/development/tags.json index ee43d671..62c21180 100644 --- a/functions/development/tags.json +++ b/functions/development/tags.json @@ -1 +1 @@ -{"kind": ["serving", "job", "nuclio:serving"], "categories": ["model-testing", "model-serving", "genai", "monitoring", "data-analysis", "deep-learning", "machine-learning", "utilities", "model-training", "data-preparation", "NLP", "data-generation", "audio"]} \ No newline at end of file +{"categories": ["data-generation", "machine-learning", "NLP", "model-testing", "audio", "monitoring", "data-preparation", "model-serving", "utilities", "model-training", "data-analysis", "deep-learning", "genai"], "kind": ["nuclio:serving", "job", "serving"]} \ No newline at end of file diff --git a/modules/development/tags.json b/modules/development/tags.json index 8d771240..d38353be 100644 --- a/modules/development/tags.json +++ b/modules/development/tags.json @@ -1 +1 @@ -{"kind": ["generic", "monitoring_application"], "categories": ["model-serving", "genai", "structured-ML"]} \ No newline at end of file +{"kind": ["monitoring_application", "generic"], "categories": ["model-serving", "structured-ML", "genai"]} \ No newline at end of file diff --git a/steps/development/tags.json b/steps/development/tags.json index 1ecf4a3b..ec9e8ba7 100644 --- a/steps/development/tags.json +++ b/steps/development/tags.json @@ -1 +1 @@ -{"kind": ["generic"], "categories": ["utilities", "model-serving", "data-preparation"]} \ No newline at end of file +{"kind": ["generic"], "categories": ["utilities", "data-preparation", "model-serving"]} \ No newline at end of file