Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .ci/scripts/wheel/pre_build_script.sh
Original file line number Diff line number Diff line change
Expand Up @@ -62,3 +62,10 @@ if [[ "$(uname -s)" == "Linux" && "$(uname -m)" == "x86_64" ]]; then
echo "QNN_SDK_ROOT=${QNN_SDK_ROOT}" >> "${GITHUB_ENV}"
echo "QNN SDK downloaded to ${QNN_SDK_ROOT}"
fi

# Install OpenVINO Python package on Linux for wheel testing.
# The backend itself has no build-time dependency (uses dlopen at runtime).
if [[ "$(uname -s)" == "Linux" ]]; then
Comment on lines +66 to +68
Copy link

Copilot AI Mar 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This installs openvino on all Linux wheel builds, including the linux-aarch64 workflow that also uses this pre-script. Since the aarch64 smoke test only checks backend registration (no runtime load), consider gating this install to x86_64 (or making it best-effort/optional) to avoid unnecessary CI time and potential platform-specific install failures.

Suggested change
# Install OpenVINO Python package on Linux for wheel testing.
# The backend itself has no build-time dependency (uses dlopen at runtime).
if [[ "$(uname -s)" == "Linux" ]]; then
# Install OpenVINO Python package on Linux x86_64 for wheel testing.
# The backend itself has no build-time dependency (uses dlopen at runtime).
if [[ "$(uname -s)" == "Linux" && "$(uname -m)" == "x86_64" ]]; then

Copilot uses AI. Check for mistakes.
echo "Installing OpenVINO runtime for testing..."
pip install "openvino>=2025.1.0,<2026.0.0"
fi
5 changes: 5 additions & 0 deletions .ci/scripts/wheel/test_linux.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,11 @@
), f"QnnBackend not found in registered backends: {registered}"
print("✓ QnnBackend is registered")

assert (
"OpenvinoBackend" in registered
), f"OpenvinoBackend not found in registered backends: {registered}"
print("✓ OpenvinoBackend is registered")

test_base.run_tests(
model_tests=[
test_base.ModelTest(
Expand Down
12 changes: 12 additions & 0 deletions .ci/scripts/wheel/test_linux_aarch64.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,18 @@
# coremltools does not support linux aarch64 yet and install from the source fails on runtime
# https://github.com/apple/coremltools/issues/1254
# https://github.com/apple/coremltools/issues/2195

from executorch.extension.pybindings.portable_lib import (
_get_registered_backend_names,
)

registered = _get_registered_backend_names()

assert (
"OpenvinoBackend" in registered
), f"OpenvinoBackend not found in registered backends: {registered}"
print("✓ OpenvinoBackend is registered")

test_base.run_tests(
model_tests=[
test_base.ModelTest(
Expand Down
4 changes: 4 additions & 0 deletions README-wheel.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@ to run ExecuTorch `.pte` files, with some restrictions:
* Only the [XNNPACK backend delegate](docs/source/backends/xnnpack/xnnpack-overview.md) is linked into the prebuilt module.
* \[macOS only] [Core ML](docs/source/backends/coreml/coreml-overview.md) and [MPS](docs/source/backends/mps/mps-overview.md) backend
are also linked into the prebuilt module.
* \[Linux x86_64] [QNN](docs/source/backends-qualcomm.md) backend is linked into the prebuilt module.
* \[Linux] [OpenVINO](docs/source/build-run-openvino.md) backend is also linked into the
prebuilt module. OpenVINO requires the runtime to be installed separately:
`pip install executorch[openvino]`
Comment on lines +17 to +20
Copy link

Copilot AI Mar 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This section says OpenVINO is linked into the prebuilt module for "[Linux]", but earlier in the same doc the package compatibility is stated as "Linux x86_64". Please clarify the intended architecture support here (e.g., change to "[Linux x86_64]" or update the compatibility statement) to avoid confusing wheel users on other Linux architectures.

Copilot uses AI. Check for mistakes.

Please visit the [ExecuTorch website](https://pytorch.org/executorch) for
tutorials and documentation. Here are some starting points:
Expand Down
22 changes: 9 additions & 13 deletions backends/openvino/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,36 +28,32 @@ set(COMMON_INCLUDE_DIRS ${EXECUTORCH_ROOT}/..)
# Include utility CMake scripts from ExecuteTorch
include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake)

# Find OpenVINO libraries
find_package(OpenVINO REQUIRED)
# The backend resolves OpenVINO C API symbols via dlopen/dlsym at runtime, so
# there is no build-time dependency on the OpenVINO SDK.

# Define OpenVINO backend as a static library
add_library(openvino_backend STATIC .)
add_library(openvino_backend STATIC)

# Enable exceptions and RTTI for OpenVINO backend
target_compile_options(openvino_backend PRIVATE -frtti -fexceptions)

# Include Executorch directories
target_include_directories(openvino_backend PRIVATE ${COMMON_INCLUDE_DIRS})

# Link OpenVINO and ExecuteTorch core libraries
target_link_libraries(
openvino_backend PRIVATE openvino::runtime executorch_core
)

# Add source files for OpenVINO backend
target_sources(
openvino_backend
PRIVATE ${CMAKE_CURRENT_LIST_DIR}/runtime/OpenvinoBackend.cpp
)

# Include Executorch directories
target_include_directories(openvino_backend PRIVATE ${COMMON_INCLUDE_DIRS})

# Link ExecuteTorch core and dynamic loading libraries
target_link_libraries(openvino_backend PRIVATE executorch_core ${CMAKE_DL_LIBS})

executorch_target_link_options_shared_lib(openvino_backend)

# Install OpenVINO backend library to the lib directory
install(
TARGETS openvino_backend
EXPORT ExecuTorchTargets
DESTINATION ${CMAKE_INSTALL_LIBDIR}
INCLUDES
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
)
31 changes: 31 additions & 0 deletions backends/openvino/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,36 @@ OpenVINO backend supports the following hardware:

For more information on the supported hardware, please refer to [OpenVINO System Requirements](https://docs.openvino.ai/2025/about-openvino/release-notes-openvino/system-requirements.html) page.

## Quick Start (pip wheel)

On Linux, the OpenVINO backend is included in the ExecuTorch pip wheel. Install the OpenVINO runtime to activate it:

```bash
pip install executorch[openvino]
```

Set the library path so the backend can find the OpenVINO runtime:

```bash
export LD_LIBRARY_PATH="$(python3 -c "import openvino, os; print(os.path.join(os.path.dirname(openvino.__file__), 'libs'))")${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
```

Or point to the library directly:

```bash
export OPENVINO_LIB_PATH=$(python3 -c "import openvino, os; print(os.path.join(os.path.dirname(openvino.__file__), 'libs', 'libopenvino_c.so'))")
```

Verify the backend is available:

```python
from executorch.extension.pybindings.portable_lib import (
_get_registered_backend_names,
)
print(_get_registered_backend_names())
# Should include 'OpenvinoBackend'
```

## Directory Structure

```
Expand All @@ -24,6 +54,7 @@ executorch
│ ├── __init__.py
│ └── quantizer.py
│ ├── runtime
│ ├── OpenvinoApi.h
│ ├── OpenvinoBackend.cpp
│ └── OpenvinoBackend.h
│ ├── scripts
Expand Down
133 changes: 133 additions & 0 deletions backends/openvino/runtime/OpenvinoApi.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
/*
* Copyright (c) Intel Corporation
*
* Licensed under the BSD License (the "License"); you may not use this file
* except in compliance with the License. See the license file found in the
* LICENSE file in the root directory of this source tree.
*/

#pragma once

#include <dlfcn.h>
#include <cstddef>
#include <cstdint>
#include <memory>

namespace executorch {
namespace backends {
namespace openvino {

// Forward declarations matching the OpenVINO C API opaque types.
// Only pointer types are used so struct layout is irrelevant.
typedef struct ov_core ov_core_t;
typedef struct ov_compiled_model ov_compiled_model_t;
typedef struct ov_infer_request ov_infer_request_t;
typedef struct ov_tensor ov_tensor_t;

// Value types reproduced from openvino/c/ov_shape.h and ov_common.h.
// These are stable C ABI — pinned via version constraint in pyproject.toml.
typedef struct {
int64_t rank;
int64_t* dims;
} ov_shape_t;

typedef struct {
char** devices;
size_t size;
} ov_available_devices_t;

// Intentionally partial — only OV_STATUS_OK is needed for success checks.
// The full enum is defined in openvino/c/ov_common.h.
typedef enum {
OV_STATUS_OK = 0,
OV_STATUS_GENERAL_ERROR = -1,
} ov_status_e;

// Values aligned with ov::element::Type_t (sequential enum).
typedef enum {
OV_ELEMENT_UNDEFINED = 0,
OV_ELEMENT_BOOLEAN = 1,
OV_ELEMENT_BF16 = 2,
OV_ELEMENT_F16 = 3,
OV_ELEMENT_F32 = 4,
OV_ELEMENT_F64 = 5,
OV_ELEMENT_I4 = 6,
OV_ELEMENT_I8 = 7,
OV_ELEMENT_I16 = 8,
OV_ELEMENT_I32 = 9,
OV_ELEMENT_I64 = 10,
OV_ELEMENT_U1 = 11,
OV_ELEMENT_U2 = 12,
OV_ELEMENT_U3 = 13,
OV_ELEMENT_U4 = 14,
OV_ELEMENT_U6 = 15,
OV_ELEMENT_U8 = 16,
} ov_element_type_e;

// Function pointer types for each OpenVINO C API function we use.
using ov_core_create_fn = ov_status_e (*)(ov_core_t**);
using ov_core_free_fn = void (*)(ov_core_t*);
using ov_core_get_available_devices_fn =
ov_status_e (*)(const ov_core_t*, ov_available_devices_t*);
using ov_available_devices_free_fn = void (*)(ov_available_devices_t*);
using ov_core_import_model_fn = ov_status_e (*)(
const ov_core_t*,
const char*,
size_t,
const char*,
ov_compiled_model_t**);
using ov_compiled_model_create_infer_request_fn =
ov_status_e (*)(const ov_compiled_model_t*, ov_infer_request_t**);
using ov_compiled_model_inputs_size_fn =
ov_status_e (*)(const ov_compiled_model_t*, size_t*);
using ov_compiled_model_outputs_size_fn =
ov_status_e (*)(const ov_compiled_model_t*, size_t*);
using ov_compiled_model_free_fn = void (*)(ov_compiled_model_t*);
using ov_infer_request_set_input_tensor_by_index_fn =
ov_status_e (*)(ov_infer_request_t*, size_t, const ov_tensor_t*);
using ov_infer_request_set_output_tensor_by_index_fn =
ov_status_e (*)(ov_infer_request_t*, size_t, const ov_tensor_t*);
using ov_infer_request_infer_fn = ov_status_e (*)(ov_infer_request_t*);
using ov_infer_request_free_fn = void (*)(ov_infer_request_t*);
using ov_tensor_create_from_host_ptr_fn =
ov_status_e (*)(ov_element_type_e, ov_shape_t, void*, ov_tensor_t**);
using ov_tensor_free_fn = void (*)(ov_tensor_t*);
using ov_shape_create_fn =
ov_status_e (*)(int64_t, const int64_t*, ov_shape_t*);
using ov_shape_free_fn = ov_status_e (*)(ov_shape_t*);

struct DlCloser {
void operator()(void* handle) {
if (handle) {
dlclose(handle);
}
}
};
using DlHandle = std::unique_ptr<void, DlCloser>;

struct OpenvinoFunctions {
ov_core_create_fn core_create = nullptr;
ov_core_free_fn core_free = nullptr;
ov_core_get_available_devices_fn core_get_available_devices = nullptr;
ov_available_devices_free_fn available_devices_free = nullptr;
ov_core_import_model_fn core_import_model = nullptr;
ov_compiled_model_create_infer_request_fn
compiled_model_create_infer_request = nullptr;
ov_compiled_model_inputs_size_fn compiled_model_inputs_size = nullptr;
ov_compiled_model_outputs_size_fn compiled_model_outputs_size = nullptr;
ov_compiled_model_free_fn compiled_model_free = nullptr;
ov_infer_request_set_input_tensor_by_index_fn
infer_request_set_input_tensor_by_index = nullptr;
ov_infer_request_set_output_tensor_by_index_fn
infer_request_set_output_tensor_by_index = nullptr;
ov_infer_request_infer_fn infer_request_infer = nullptr;
ov_infer_request_free_fn infer_request_free = nullptr;
ov_tensor_create_from_host_ptr_fn tensor_create_from_host_ptr = nullptr;
ov_tensor_free_fn tensor_free = nullptr;
ov_shape_create_fn shape_create = nullptr;
ov_shape_free_fn shape_free = nullptr;
};

} // namespace openvino
} // namespace backends
} // namespace executorch
Loading
Loading