diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..b0154c9 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "jbpf"] + path = jbpf + url = https://github.com/microsoft/jbpf.git +[submodule "3p/nanopb"] + path = 3p/nanopb + url = https://github.com/nanopb/nanopb.git diff --git a/3p/nanopb b/3p/nanopb new file mode 160000 index 0000000..b36a089 --- /dev/null +++ b/3p/nanopb @@ -0,0 +1 @@ +Subproject commit b36a089ae41284bf4af5230c423750dfbadd649f diff --git a/README.md b/README.md index 5cd7cec..cf5327c 100644 --- a/README.md +++ b/README.md @@ -1,33 +1,57 @@ -# Project - -> This repo has been populated by an initial template to help get you started. Please -> make sure to update the content to build a great experience for community-building. - -As the maintainer of this project, please make a few updates: - -- Improving this README.MD file to provide a great experience -- Updating SUPPORT.MD with content about this project's support experience -- Understanding the security reporting process in SECURITY.MD -- Remove this section from the README - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a -Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us -the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. - -When you submit a pull request, a CLA bot will automatically determine whether you need to provide -a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions -provided by the bot. You will only need to do this once across all repos using our CLA. - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - -## Trademarks - -This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft -trademarks or logos is subject to and must follow -[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). -Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. -Any use of third-party trademarks or logos are subject to those third-party's policies. +# jbpf-protobuf + +**NOTE: This project uses an experimental feature from jbpf. It is not meant to be used in production environments.** + +This repository is a extension for [jbpf](https://github.com/microsoft/jbpf/) demonstrating how to utilize protobuf serialization as part of jbpf. + +Prerequisites: +* C compiler +* Go v1.23.2+ +* Make +* Pip +* Python3 +* Protocol Buffer Compiler (protoc) + +The project utilizes [Nanopb](https://github.com/nanopb/nanopb) to generate C structures for given protobuf specs that use contiguous memory. It also generates serializer libraries that can be provided to jbpf, to encode output and decode input data to seamlessly integrate external data processing systems. + +# Getting started + +```sh +# init submodules: +./init_submodules.sh + +# Install nanopb pip packages: +python3 -m pip install -r 3p/nanopb/requirements.txt + +# source environment variables +source ./setup_jbpfp_env.sh + +# build jbpf_protobuf_cli +make -C pkg +``` + +Alternatively, build using a container: +```sh +# init submodules: +./init_submodules.sh + +docker build -t jbpf_protobuf_builder:latest -f deploy/Dockerfile . +``` + +## Running the examples + +In order to run any of the samples, you'll need to build jbpf. + +```sh +mkdir -p jbpf/build +cd jbpf/build +cmake .. -DJBPF_EXPERIMENTAL_FEATURES=on +make -j +cd ../.. +``` + +Then follow [these](./examples/first_example_standalone/README.md) steps to run a simple example. + +# License + +The jbpf framework is licensed under the [MIT license](LICENSE.md). diff --git a/deploy/Dockerfile b/deploy/Dockerfile new file mode 100644 index 0000000..84b57d9 --- /dev/null +++ b/deploy/Dockerfile @@ -0,0 +1,26 @@ +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.23.2-1-azurelinux3.0 AS builder + +RUN tdnf upgrade tdnf --refresh -y && tdnf -y update +RUN tdnf install -y make python3-pip awk jq +RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b /root/go/bin v1.60.3 +ENV PATH="$PATH:/root/go/bin" + +COPY pkg /workspace/pkg +COPY 3p /workspace/3p +RUN python3 -m pip install -r /workspace/3p/nanopb/requirements.txt +COPY testdata /workspace/testdata +ENV NANO_PB=/workspace/3p/nanopb + +RUN make -C /workspace/pkg +RUN make -C /workspace/pkg test lint -j + +FROM mcr.microsoft.com/azurelinux/base/core:3.0 +RUN tdnf upgrade tdnf --refresh -y && tdnf -y update +RUN tdnf install -y build-essential make python3-pip + +COPY --from=builder /workspace/3p/nanopb /nanopb +RUN python3 -m pip install -r /nanopb/requirements.txt +COPY --from=builder /workspace/pkg/jbpf_protobuf_cli /usr/local/bin/jbpf_protobuf_cli +ENV NANO_PB=/nanopb + +ENTRYPOINT [ "jbpf_protobuf_cli" ] diff --git a/docs/design.md b/docs/design.md new file mode 100644 index 0000000..3fdef67 --- /dev/null +++ b/docs/design.md @@ -0,0 +1,94 @@ +# High level Architecture + +`jbpf_protobuf_cli` provides tooling to generate serialization assets for `jbpf` using protobuf. + +For complete details of each subcommand, see `./jbpf_protobuf_cli {SUBCOMMAND} --help`. + +![architecture](./jbpf_arch.png) + +## Serde + +The `serde` subcommand generates assets from protobuf specs which can integrate with `jbpf`'s [serde functionality](https://github.com/microsoft/jbpf/blob/main/docs/serde.md). + +Developers must write `.proto` file(s) defining the models that are to be serialized. Additionally they must provide [generator options](https://jpa.kapsi.fi/nanopb/docs/reference.html#generator-options) as defined by nanopb to ensure generated structs can be defined in C as contiguous memory structs. + + +### Simple example + +This example goes through generating serde assets for a simple protobuf schema. + +``` +// schema.proto +syntax = "proto2"; + +message my_struct { + required int32 value = 1; + required string name = 2; +} + +// schema.options +my_struct.name max_size:32 +``` + +```sh +# To see all flags and options available, see +./jbpf_protobuf_cli serde --help + +# Generate the jbpf serde assets for the above proto spec +./jbpf_protobuf_cli serde -s schema:my_struct +``` + +This will generate the following files: +* `schema:my_struct_serializer.c`: + ```c + #define PB_FIELD_32BIT 1 + #include + #include + #include + #include "schema.pb.h" + + const uint32_t proto_message_size = sizeof(my_struct); + + int jbpf_io_serialize(void* input_msg_buf, size_t input_msg_buf_size, char* serialized_data_buf, size_t serialized_data_buf_size) { + if (input_msg_buf_size != proto_message_size) + return -1; + + pb_ostream_t ostream = pb_ostream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + if (!pb_encode(&ostream, my_struct_fields, input_msg_buf)) + return -1; + + return ostream.bytes_written; + } + + int jbpf_io_deserialize(char* serialized_data_buf, size_t serialized_data_buf_size, void* output_msg_buf, size_t output_msg_buf_size) { + if (output_msg_buf_size != proto_message_size) + return 0; + + pb_istream_t istream = pb_istream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + return pb_decode(&istream, my_struct_fields, output_msg_buf); + } + ``` +* `schema:my_struct_serializer.so` is the compiled shared object library of `schema:my_struct_serializer.c`. +* `schema.pb` is the complied protobuf spec. +* `schema.pb.c` is the generated nanopb constant definitions. +* `schema.pb.h` is the generated nanopb headers file. + +When loading the codelet description you can provide the generated `{schema}:{message_name}_serializer.so` as the io_channel `serde.file_path`. + +Additionally, you can provide the `{schema}.pb` to a decoder to be able to dynamically decode/encode the protobuf messages. + +To see detailed usage, run `jbpf_protobuf_cli serde --help`. + +## Decoder + +The cli tool also provides a `decoder` subcommand which can be run locally to receive and print protobuf messages sent over a UDP channel. The examples [example_collect_control](../examples/first_example_ipc/example_collect_control.cpp) and [first_example_standalone](../examples/first_example_standalone/example_app.cpp) bind to a UDP socket on port 20788 to send output data from jbpf which matches the default UDP socket for the decoder. + +This is useful for debugging output from jbpf and provide an example of how someone might dynamically decode output from jbpf by providing `.pb` schemas along with the associated stream identifier. + +To see detailed usage, run `jbpf_protobuf_cli decoder --help`. + +## Input Forwarder + +The tool also provides the ability to dynamically send protobuf input to jbpf from an external entity. It uses a TCP socket to send input channel messages to a jbpf instance. The examples [example_collect_control](../examples/first_example_ipc/example_collect_control.cpp) and [first_example_standalone](../examples/first_example_standalone/example_app.cpp) bind to a TCP socket on port 20787 to receive input data for jbpf which matches the default TCP socket for the input forwarder. + +To see detailed usage, run `jbpf_protobuf_cli input forward --help`. diff --git a/docs/jbpf_arch.png b/docs/jbpf_arch.png new file mode 100755 index 0000000..345f44e Binary files /dev/null and b/docs/jbpf_arch.png differ diff --git a/examples/first_example_ipc/.gitignore b/examples/first_example_ipc/.gitignore new file mode 100644 index 0000000..cf2fdbe --- /dev/null +++ b/examples/first_example_ipc/.gitignore @@ -0,0 +1,7 @@ +*.pb +*.pb.c +*.pb.h +*.so +example_app +example_codelet.o +example_collect_control diff --git a/examples/first_example_ipc/Makefile b/examples/first_example_ipc/Makefile new file mode 100644 index 0000000..ccaa66e --- /dev/null +++ b/examples/first_example_ipc/Makefile @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +ifeq ($(BUILD_TYPE),Debug) + DEBUG_CFLAGS = -g + DEBUG_LDFLAGS = -lgcov +else ifeq ($(BUILD_TYPE),AddressSanitizer) + DEBUG_CFLAGS = -fsanitize=address +endif + +AGENT_NAME := example_app +PRIMARY_NAME := example_collect_control +CODELET_NAME := example_codelet.o +INCLUDES := -I${JBPF_OUT_DIR}/inc -I${JBPF_PATH}/src/common -I${NANO_PB} -DJBPF_EXPERIMENTAL_FEATURES=on +AGENT_LDFLAGS := -L${JBPF_OUT_DIR}/lib -ljbpf -lck -lubpf -lmimalloc -lpthread -ldl -lrt ${DEBUG_LDFLAGS} +PRIMARY_LDFLAGS := -L${JBPF_OUT_DIR}/lib -ljbpf_io -lck -lmimalloc -lpthread -ldl -lrt ${DEBUG_LDFLAGS} +AGENT_FILE := example_app.cpp +PRIMARY_FILE := example_collect_control.cpp +CODELET_FILE := example_codelet.c +CODELET_CC := clang +JBPF_PROTOBUF_CLI := ${JBPFP_PATH}/pkg/jbpf_protobuf_cli + +CODELET_CFLAGS := -O2 -target bpf -Wall -DJBPF_DEBUG_ENABLED -D__x86_64__ + +.PHONY: all clean + +all: clean schema codelet agent primary + +codelet: ${CODELET_FILE} + ${CODELET_CC} ${CODELET_CFLAGS} ${INCLUDES} -c ${CODELET_FILE} -o ${CODELET_NAME} + +schema: + ${JBPF_PROTOBUF_CLI} serde -s schema:packet,manual_ctrl_event; \ + rm -f *_serializer.c + +agent: + g++ -std=c++17 $(INCLUDES) -o ${AGENT_NAME} $(AGENT_FILE) ${DEBUG_CFLAGS} ${AGENT_LDFLAGS} + +primary: + g++ -std=c++17 $(INCLUDES) -o ${PRIMARY_NAME} $(PRIMARY_FILE) ${DEBUG_CFLAGS} ${PRIMARY_LDFLAGS} + +clean: + rm -f ${AGENT_NAME} ${PRIMARY_NAME} ${CODELET_NAME} *.pb.h *.pb.c *.pb *.so diff --git a/examples/first_example_ipc/README.md b/examples/first_example_ipc/README.md new file mode 100644 index 0000000..2e243e1 --- /dev/null +++ b/examples/first_example_ipc/README.md @@ -0,0 +1,114 @@ +# Basic example of standalone *jbpf* operation + +This example showcases a basic *jbpf-protobuf* usage scenario, when using in IPC mode. It provides a C++ application (`example_collect_control`) + that initializes *jbpf* in IPC primary mode, a dummy C++ application (`example_app`), that initializes +*jbpf* in IPC secondary mode, and an example codelet (`example_codelet.o`). +The example demonstrates the following: +1. How to declare and call hooks in the *jbpf* secondary process. +2. How to collect data sent by the codelet from the *jbpf* primary process. +3. How to forward data sent by the codelet onwards to a local decoder using a UDP socket. +4. How to receive data sent by the decoder using a TCP socket onwards to the primary process. +5. How to load and unload codeletsets using the LCM CLI tool (via a Unix socket API). + +For more details of the exact behavior of the application and the codelet, you can check the inline comments in [example_collect_control.cpp](./example_collect_control.cpp), +[example_app.cpp](./example_app.cpp) and [example_codelet.c](./example_codelet.c) + +## Usage + +This example expects *jbpf* to be built (see [README.md](../../README.md)). + +To build the example from scratch, we run the following commands: +```sh +$ source ../../setup_jbpfp_env.sh +$ make +``` + +This should produce these artifacts: +* `example_collect_control` +* `example_app` +* `example_codelet.o` +* `schema:manual_ctrl_event_serializer.so` - serializer library for `manual_ctrl_event` protobuf struct. +* `schema:packet_serializer.so` - serializer library for `packet` protobuf struct. +* `schema.pb` - compiled protobuf of [schema.proto](./schema.proto). +* `schema.pb.c` - nanopb generated C file. +* `schema.pb.h` - nanopb generated H file. + +To bring the primary application up, we run the following commands: +```sh +$ source ../../setup_jbpfp_env.sh +$ ./run_collect_control.sh +``` + +To start the local decoder: +```sh +$ source ../../setup_jbpfp_env.sh +$ ./run_decoder.sh +``` + +If successful, we should see the following line printed: +``` +[JBPF_INFO]: Allocated size is 1107296256 +``` + +To bring the primary application up, we run the following commands on a second terminal: +```sh +$ source ../../setup_jbpfp_env.sh +$ ./run_app.sh +``` + +If successful, we should see the following printed in the log of the secondary: +``` +[JBPF_INFO]: Agent thread initialization finished +[JBPF_INFO]: Setting the name of thread 1035986496 to jbpf_lcm_ipc +[JBPF_INFO]: Registered thread id 1 +[JBPF_INFO]: Started LCM IPC thread at /var/run/jbpf/jbpf_lcm_ipc +[JBPF_DEBUG]: jbpf_lcm_ipc thread ready +[JBPF_INFO]: Registered thread id 2 +[JBPF_INFO]: Started LCM IPC server +``` + +and on the primary: +``` +[JBPF_INFO]: Negotiation was successful +[JBPF_INFO]: Allocation worked for size 1073741824 +[JBPF_INFO]: Allocated size is 1073741824 +[JBPF_INFO]: Heap was created successfully +``` + +To load the codeletset, we run the following commands on a third terminal window: +```sh +$ source ../../setup_jbpfp_env.sh +$ ./load.sh +``` + +If the codeletset was loaded successfully, we should see the following output in the `example_app` window: +``` +[JBPF_INFO]: VM created and loaded successfully: example_codelet +``` + +After that, the primary `example_collect_control` should start printing periodical messages (once per second): +``` +INFO[0008] {"seqNo":5, "value":-5, "name":"instance 5"} streamUUID=00112233-4455-6677-8899-aabbccddeeff +INFO[0009] {"seqNo":6, "value":-6, "name":"instance 6"} streamUUID=00112233-4455-6677-8899-aabbccddeeff +INFO[0010] {"seqNo":7, "value":-7, "name":"instance 7"} streamUUID=00112233-4455-6677-8899-aabbccddeeff +``` + +To send a manual control message to the `example_app`, we run the command: +```sh +$ ./send_input_msg.sh 101 +``` + +This should trigger a message in the `example_app`: +``` +[JBPF_DEBUG]: Called 2 times so far and received manual_ctrl_event with value 101 +``` + +To unload the codeletset, we run the command: +```sh +$ ./unload.sh +``` + +The `example_app` should stop printing the periodical messages and should give the following output: +``` +[JBPF_INFO]: VM with vmfd 0 (i = 0) destroyed successfully +``` \ No newline at end of file diff --git a/examples/first_example_ipc/codeletset_load_request.yaml b/examples/first_example_ipc/codeletset_load_request.yaml new file mode 100644 index 0000000..7976255 --- /dev/null +++ b/examples/first_example_ipc/codeletset_load_request.yaml @@ -0,0 +1,21 @@ +codelet_descriptor: + - codelet_name: example_codelet + codelet_path: ${JBPFP_PATH}/examples/first_example_ipc/example_codelet.o + hook_name: example + in_io_channel: + - name: inmap + stream_id: "11111111111111111111111111111111" + serde: + file_path: ${JBPFP_PATH}/examples/first_example_ipc/schema:manual_ctrl_event_serializer.so + protobuf: + package_path: ${JBPFP_PATH}/examples/first_example_ipc/schema.pb + msg_name: manual_ctrl_event + out_io_channel: + - name: outmap + stream_id: 00112233445566778899AABBCCDDEEFF + serde: + file_path: ${JBPFP_PATH}/examples/first_example_ipc/schema:packet_serializer.so + protobuf: + package_path: ${JBPFP_PATH}/examples/first_example_ipc/schema.pb + msg_name: packet +codeletset_id: example_codeletset diff --git a/examples/first_example_ipc/codeletset_unload_request.yaml b/examples/first_example_ipc/codeletset_unload_request.yaml new file mode 100644 index 0000000..f24189a --- /dev/null +++ b/examples/first_example_ipc/codeletset_unload_request.yaml @@ -0,0 +1 @@ +codeletset_id: example_codeletset diff --git a/examples/first_example_ipc/example_app.cpp b/examples/first_example_ipc/example_app.cpp new file mode 100644 index 0000000..a27db3c --- /dev/null +++ b/examples/first_example_ipc/example_app.cpp @@ -0,0 +1,107 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +#include +#include +#include +#include +#include + +#include "schema.pb.h" + +#include "jbpf.h" +#include "jbpf_hook.h" +#include "jbpf_defs.h" + +using namespace std; + +#define SHM_NAME "example_ipc_app" + +// Hook declaration and definition. +DECLARE_JBPF_HOOK( + example, + struct jbpf_generic_ctx ctx, + ctx, + HOOK_PROTO(packet *p, int ctx_id), + HOOK_ASSIGN(ctx.ctx_id = ctx_id; ctx.data = (uint64_t)(void *)p; ctx.data_end = (uint64_t)(void *)(p + 1);)) + +DEFINE_JBPF_HOOK(example) + +bool done = false; + +void sig_handler(int signo) +{ + done = true; +} + +int handle_signal() +{ + if (signal(SIGINT, sig_handler) == SIG_ERR) + { + return 0; + } + if (signal(SIGTERM, sig_handler) == SIG_ERR) + { + return 0; + } + return -1; +} + +int main(int argc, char **argv) +{ + + struct jbpf_config jbpf_config = {0}; + jbpf_set_default_config_options(&jbpf_config); + + // Instruct libjbpf to use an external IPC interface + jbpf_config.io_config.io_type = JBPF_IO_IPC_CONFIG; + // Configure memory size for the IO buffer + jbpf_config.io_config.io_ipc_config.ipc_mem_size = JBPF_HUGEPAGE_SIZE_1GB; + strncpy(jbpf_config.io_config.io_ipc_config.ipc_name, SHM_NAME, JBPF_IO_IPC_MAX_NAMELEN); + + // Enable LCM IPC interface using UNIX socket at the default socket path (the default is through C API) + jbpf_config.lcm_ipc_config.has_lcm_ipc_thread = true; + snprintf( + jbpf_config.lcm_ipc_config.lcm_ipc_name, + sizeof(jbpf_config.lcm_ipc_config.lcm_ipc_name) - 1, + "%s", + JBPF_DEFAULT_LCM_SOCKET); + + if (!handle_signal()) + { + std::cout << "Could not register signal handler" << std::endl; + return -1; + } + + // Initialize jbpf + if (jbpf_init(&jbpf_config) < 0) + { + return -1; + } + + // Any thread that calls a hook must be registered + jbpf_register_thread(); + + int i = 0; + + // Sample application code calling a hook every second + while (!done) + { + packet p; + p.seq_no = i; + p.value = -i; + + std::stringstream ss; + ss << "instance " << i; + + std::strcpy(p.name, ss.str().c_str()); + + // Call hook and pass packet + hook_example(&p, 1); + sleep(1); + i++; + } + + jbpf_stop(); + exit(EXIT_SUCCESS); + + return 0; +} diff --git a/examples/first_example_ipc/example_codelet.c b/examples/first_example_ipc/example_codelet.c new file mode 100644 index 0000000..c609fa2 --- /dev/null +++ b/examples/first_example_ipc/example_codelet.c @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +#include + +#include "jbpf_defs.h" +#include "jbpf_helper.h" +#include "schema.pb.h" + +// Output map of type JBPF_MAP_TYPE_RINGBUF. +// The map is used to send out data of type packet. +// It holds a ringbuffer with a total of 3 elements. +jbpf_ringbuf_map(outmap, packet, 3); + +// Input map of type JBPF_MAP_TYPE_CONTROL_INPUT. +// The map is used to receive data of type manual_ctrl_event. +// It uses a ringbuffer, that can store a total of 3 elements. +jbpf_control_input_map(inmap, manual_ctrl_event, 3); + +// A map of type JBPF_MAP_TYPE_ARRAY, which is used +// to store internal codelet state. +struct jbpf_load_map_def SEC("maps") counter = { + .type = JBPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 1, +}; + +SEC("jbpf_generic") +uint64_t +jbpf_main(void *state) +{ + + void *c; + int cnt; + struct jbpf_generic_ctx *ctx; + packet *p, *p_end; + packet echo; + manual_ctrl_event resp = {0}; + uint64_t index = 0; + + ctx = state; + + c = jbpf_map_lookup_elem(&counter, &index); + if (!c) + return 1; + + cnt = *(int *)c; + cnt++; + *(uint32_t *)c = cnt; + + p = (packet *)ctx->data; + p_end = (packet *)ctx->data_end; + + if (p + 1 > p_end) + return 1; + + echo = *p; + + // Copy the data that was passed to the codelet to the outmap ringbuffer + // and send them out. + if (jbpf_ringbuf_output(&outmap, &echo, sizeof(echo)) < 0) + { + return 1; + } + + if (jbpf_control_input_receive(&inmap, &resp, sizeof(resp)) == 1) + { + // Print a debug message. This helper function should NOT be used in production environments, due to + // its performance overhead. The helper function will be ignored, if *jbpf* has been built with the + // USE_JBPF_PRINTF_HELPER option set to OFF. + jbpf_printf_debug(" Called %d times so far and received manual_ctrl_event with value %d\n\n", cnt, resp.value); + } + + return 0; +} diff --git a/examples/first_example_ipc/example_collect_control.cpp b/examples/first_example_ipc/example_collect_control.cpp new file mode 100644 index 0000000..06a8422 --- /dev/null +++ b/examples/first_example_ipc/example_collect_control.cpp @@ -0,0 +1,223 @@ +#include +#include + +#include "jbpf.h" +#include "jbpf_defs.h" +#include "jbpf_io.h" +#include "jbpf_io_channel.h" +#include + +#include "schema.pb.h" + +#define SHM_NAME "example_ipc_app" + +#define MAX_SERIALIZED_SIZE 1024 + +int sockfd; +struct sockaddr_in servaddr; +static volatile int done = 0; + +static void +handle_channel_bufs( + struct jbpf_io_channel *io_channel, struct jbpf_io_stream_id *stream_id, void **bufs, int num_bufs, void *ctx) +{ + struct jbpf_io_ctx *io_ctx = static_cast(ctx); + char serialized[MAX_SERIALIZED_SIZE]; + int serialized_size; + + if (stream_id && num_bufs > 0) + { + // Fetch the data and send to local decoder + for (auto i = 0; i < num_bufs; i++) + { + serialized_size = jbpf_io_channel_pack_msg(io_ctx, bufs[i], serialized, sizeof(serialized)); + if (serialized_size > 0) + { + sendto(sockfd, serialized, serialized_size, + MSG_CONFIRM, (const struct sockaddr *)&servaddr, + sizeof(servaddr)); + std::cout << "Message sent, size: " << serialized_size << std::endl; + } + else + { + std::cerr << "Failed to serialize message. Got return code: " << serialized_size << std::endl; + } + } + } +} + +void *fwd_socket_to_channel_in(void *arg) +{ + struct jbpf_io_ctx *io_ctx = static_cast(arg); + + jbpf_io_register_thread(); + + int sockfd, connfd; + socklen_t len; + struct sockaddr_in servaddr, cli; + + // socket create and verification + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (sockfd == -1) + { + printf("socket creation failed...\n"); + exit(0); + } + else + printf("Socket successfully created..\n"); + bzero(&servaddr, sizeof(servaddr)); + + servaddr.sin_family = AF_INET; + servaddr.sin_addr.s_addr = htonl(INADDR_ANY); + servaddr.sin_port = htons(20787); + + if ((bind(sockfd, (struct sockaddr *)&servaddr, sizeof(servaddr))) != 0) + { + printf("socket bind failed...\n"); + exit(0); + } + else + printf("Socket successfully binded..\n"); + + if ((listen(sockfd, 5)) != 0) + { + printf("Listen failed...\n"); + exit(0); + } + else + printf("Server listening..\n"); + len = sizeof(cli); + + for (;;) + { + connfd = accept(sockfd, (struct sockaddr *)&cli, &len); + if (connfd < 0) + { + printf("server accept failed...\n"); + exit(0); + } + else + printf("server accept the client...\n"); + + char buff[MAX_SERIALIZED_SIZE]; + int n; + struct jbpf_io_stream_id stream_id = {0}; + + for (;;) + { + auto n_diff = read(connfd, &buff[n], sizeof(buff) - n); + n += n_diff; + if (n_diff == 0) + { + printf("Client disconnected\n"); + break; + } + else if (n >= 18) + { + uint16_t payload_size = buff[1] * 256 + buff[0]; + if (n < payload_size + 2) + { + continue; + } + else if (n > payload_size + 2) + { + std::cerr << "Unexpected number of bytes in buffer, expected: " << payload_size << ", got: " << n - 2 << std::endl; + break; + } + + jbpf_channel_buf_ptr deserialized = jbpf_io_channel_unpack_msg(io_ctx, &buff[2], payload_size, &stream_id); + if (deserialized == NULL) + { + std::cerr << "Failed to deserialize message. Got NULL" << std::endl; + } + else + { + auto io_channel = jbpf_io_find_channel(io_ctx, stream_id, false); + if (io_channel) + { + auto ret = jbpf_io_channel_submit_buf(io_channel); + if (ret != 0) + { + std::cerr << "Failed to send message to channel. Got return code: " << ret << std::endl; + } + else + { + std::cout << "Dispatched msg of size: " << payload_size << std::endl; + } + } + else + { + std::cerr << "Failed to find io channel. Got NULL" << std::endl; + } + } + bzero(buff, MAX_SERIALIZED_SIZE); + n = 0; + } + } + } + + close(sockfd); + + // exit the current thread + pthread_exit(NULL); +} + +void handle_ctrl_c(int signum) +{ + printf("\nCaught Ctrl+C! Exiting gracefully...\n"); + done = 1; +} + +int main(int argc, char **argv) +{ + signal(SIGINT, handle_ctrl_c); + + // Creating socket file descriptor + if ((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) + { + perror("socket creation failed"); + exit(EXIT_FAILURE); + } + + memset(&servaddr, 0, sizeof(servaddr)); + + // Filling server information + servaddr.sin_family = AF_INET; + servaddr.sin_port = htons(20788); + servaddr.sin_addr.s_addr = INADDR_ANY; + + struct jbpf_io_config io_config = {0}; + struct jbpf_io_ctx *io_ctx; + + // Designate the data collection framework as a primary for the IPC + io_config.type = JBPF_IO_IPC_PRIMARY; + + strncpy(io_config.ipc_config.addr.jbpf_io_ipc_name, SHM_NAME, JBPF_IO_IPC_MAX_NAMELEN); + + // Configure memory size for the IO buffer + io_config.ipc_config.mem_cfg.memory_size = JBPF_HUGEPAGE_SIZE_1GB; + + // Configure the jbpf agent to operate in shared memory mode + io_ctx = jbpf_io_init(&io_config); + + if (!io_ctx) + { + return -1; + } + + pthread_t ptid; + pthread_create(&ptid, NULL, &fwd_socket_to_channel_in, io_ctx); + + // Every thread that sends or receives jbpf data needs to be registered using this call + jbpf_io_register_thread(); + + while (!done) + { + // Continuously poll IPC output buffers + jbpf_io_channel_handle_out_bufs(io_ctx, handle_channel_bufs, io_ctx); + sleep(1); + } + + pthread_cancel(ptid); + return 0; +} diff --git a/examples/first_example_ipc/load.sh b/examples/first_example_ipc/load.sh new file mode 100755 index 0000000..0269ff0 --- /dev/null +++ b/examples/first_example_ipc/load.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -e + +$JBPFP_PATH/pkg/jbpf_protobuf_cli decoder load -c codeletset_load_request.yaml + +$JBPF_PATH/out/bin/jbpf_lcm_cli -l -c codeletset_load_request.yaml diff --git a/examples/first_example_ipc/run_app.sh b/examples/first_example_ipc/run_app.sh new file mode 100755 index 0000000..6b1b44e --- /dev/null +++ b/examples/first_example_ipc/run_app.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$JBPF_PATH/out/lib ./example_app diff --git a/examples/first_example_ipc/run_collect_control.sh b/examples/first_example_ipc/run_collect_control.sh new file mode 100755 index 0000000..45970ed --- /dev/null +++ b/examples/first_example_ipc/run_collect_control.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$JBPF_PATH/out/lib ./example_collect_control diff --git a/examples/first_example_ipc/run_decoder.sh b/examples/first_example_ipc/run_decoder.sh new file mode 100755 index 0000000..0458541 --- /dev/null +++ b/examples/first_example_ipc/run_decoder.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +$JBPFP_PATH/pkg/jbpf_protobuf_cli decoder run diff --git a/examples/first_example_ipc/schema.options b/examples/first_example_ipc/schema.options new file mode 100644 index 0000000..618bb99 --- /dev/null +++ b/examples/first_example_ipc/schema.options @@ -0,0 +1 @@ +packet.name max_size:32 diff --git a/examples/first_example_ipc/schema.proto b/examples/first_example_ipc/schema.proto new file mode 100644 index 0000000..b3db3d3 --- /dev/null +++ b/examples/first_example_ipc/schema.proto @@ -0,0 +1,9 @@ +syntax = "proto2"; + +message packet { + required int32 seq_no = 1; + required int32 value = 2; + required string name = 3; +} + +message manual_ctrl_event { required int32 value = 1; } diff --git a/examples/first_example_ipc/send_input_msg.sh b/examples/first_example_ipc/send_input_msg.sh new file mode 100755 index 0000000..b00c4df --- /dev/null +++ b/examples/first_example_ipc/send_input_msg.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +$JBPFP_PATH/pkg/jbpf_protobuf_cli input forward \ + -c codeletset_load_request.yaml \ + --stream-id 11111111-1111-1111-1111-111111111111 \ + --inline-json "{\"value\": $1}" diff --git a/examples/first_example_ipc/unload.sh b/examples/first_example_ipc/unload.sh new file mode 100755 index 0000000..1b5e2b7 --- /dev/null +++ b/examples/first_example_ipc/unload.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +$JBPF_PATH/out/bin/jbpf_lcm_cli -u -c codeletset_unload_request.yaml + +$JBPFP_PATH/pkg/jbpf_protobuf_cli decoder unload -c codeletset_load_request.yaml diff --git a/examples/first_example_standalone/.gitignore b/examples/first_example_standalone/.gitignore new file mode 100644 index 0000000..6b7a62a --- /dev/null +++ b/examples/first_example_standalone/.gitignore @@ -0,0 +1,6 @@ +*.pb +*.pb.c +*.pb.h +*.so +example_app +example_codelet.o diff --git a/examples/first_example_standalone/Makefile b/examples/first_example_standalone/Makefile new file mode 100644 index 0000000..e54640b --- /dev/null +++ b/examples/first_example_standalone/Makefile @@ -0,0 +1,36 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +ifeq ($(BUILD_TYPE),Debug) + DEBUG_CFLAGS = -g + DEBUG_LDFLAGS = -lgcov +else ifeq ($(BUILD_TYPE),AddressSanitizer) + DEBUG_CFLAGS = -fsanitize=address +endif + +AGENT_NAME := example_app +CODELET_NAME := example_codelet.o +INCLUDES := -I${JBPF_OUT_DIR}/inc -I${NANO_PB} -DJBPF_EXPERIMENTAL_FEATURES=on +LDFLAGS := -L${JBPF_OUT_DIR}/lib -ljbpf -lck -lubpf -lmimalloc -lpthread -ldl -lrt ${DEBUG_LDFLAGS} +AGENT_FILE := example_app.cpp +CODELET_FILE := example_codelet.c +CODELET_CC := clang +JBPF_PROTOBUF_CLI := ${JBPFP_PATH}/pkg/jbpf_protobuf_cli + +CODELET_CFLAGS := -O2 -target bpf -Wall -DJBPF_DEBUG_ENABLED -D__x86_64__ + +.PHONY: all clean + +all: clean schema codelet agent + +codelet: ${CODELET_FILE} + ${CODELET_CC} ${CODELET_CFLAGS} ${INCLUDES} -c ${CODELET_FILE} -o ${CODELET_NAME} + +schema: + ${JBPF_PROTOBUF_CLI} serde -s schema:packet,manual_ctrl_event; \ + rm -f *_serializer.c + +agent: + g++ -std=c++17 $(INCLUDES) -o ${AGENT_NAME} $(AGENT_FILE) ${DEBUG_CFLAGS} ${LDFLAGS} + +clean: + rm -f ${AGENT_NAME} ${CODELET_NAME} *.pb.h *.pb.c *.pb *.so + diff --git a/examples/first_example_standalone/README.md b/examples/first_example_standalone/README.md new file mode 100644 index 0000000..a72011f --- /dev/null +++ b/examples/first_example_standalone/README.md @@ -0,0 +1,88 @@ +# Basic example of standalone *jbpf* operation + +This example showcases a basic *jbpf* usage scenario. It provides a dummy C++ application (`example_app`), that initializes +*jbpf* in standalone mode, and an example codelet (`example_codelet.o`). +The example demonstrates the following: +1. How to declare and call hooks. +2. How to register handler functions for capturing output data from codelets in standalone mode. +3. How to load and unload codeletsets using the LCM CLI tool (via a Unix socket API). +4. How to send data back to running codelets. + +For more details of the exact behavior of the application and the codelet, check [here](../../docs/understand_first_codelet.md). +You can also check the inline comments in [example_app.cpp](./example_app.cpp) +and [example_codelet.c](./example_codelet.c) + + +## Usage + +This example expects *jbpf* to be built (see [README.md](../../README.md)). + +To build the example from scratch, we run the following commands: +```sh +$ source ../../setup_jbpfp_env.sh +$ make +``` + +This should produce these artifacts: +* `example_app` +* `example_codelet.o` +* `schema:manual_ctrl_event_serializer.so` - serializer library for `manual_ctrl_event` protobuf struct. +* `schema:packet_serializer.so` - serializer library for `packet` protobuf struct. +* `schema.pb` - compiled protobuf of [schema.proto](./schema.proto). +* `schema.pb.c` - nanopb generated C file. +* `schema.pb.h` - nanopb generated H file. + +To bring up the application, we run the following commands: +```sh +$ source ../../setup_jbpfp_env.sh +$ ./run_app.sh +``` + +To start the local decoder: +```sh +$ source ../../setup_jbpfp_env.sh +$ ./run_decoder.sh +``` + +If successful, we shoud see the following line printed: +``` +[JBPF_INFO]: Started LCM IPC server +``` + +To load the codeletset, we run the following commands on a second terminal window: +```sh +$ source ../../setup_jbpfp_env.sh +$ ./load.sh +``` + +If the codeletset was loaded successfully, we should see the following output in the `example_app` window: +``` +[JBPF_INFO]: VM created and loaded successfully: example_codelet +``` + +After that, the agent should start printing periodical messages (once per second): +``` +INFO[0008] {"seqNo":5, "value":-5, "name":"instance 5"} streamUUID=00112233-4455-6677-8899-aabbccddeeff +INFO[0009] {"seqNo":6, "value":-6, "name":"instance 6"} streamUUID=00112233-4455-6677-8899-aabbccddeeff +INFO[0010] {"seqNo":7, "value":-7, "name":"instance 7"} streamUUID=00112233-4455-6677-8899-aabbccddeeff +``` + +To send a manual control message to the `example_app`, we run the command: +```sh +$ ./send_input_msg.sh 101 +``` + +This should trigger a message in the `example_app`: +``` +[JBPF_DEBUG]: Called 2 times so far and received manual_ctrl_event with value 101 +``` + +To unload the codeletset, we run the command: +```sh +$ ./unload.sh +``` + +The `example_app` should stop printing the periodical messages and should give the following output: +``` +[JBPF_INFO]: VM with vmfd 0 (i = 0) destroyed successfully +``` \ No newline at end of file diff --git a/examples/first_example_standalone/codeletset_load_request.yaml b/examples/first_example_standalone/codeletset_load_request.yaml new file mode 100644 index 0000000..617bbdd --- /dev/null +++ b/examples/first_example_standalone/codeletset_load_request.yaml @@ -0,0 +1,21 @@ +codelet_descriptor: + - codelet_name: example_codelet + codelet_path: ${JBPFP_PATH}/examples/first_example_standalone/example_codelet.o + hook_name: example + in_io_channel: + - name: inmap + stream_id: "11111111111111111111111111111111" + serde: + file_path: ${JBPFP_PATH}/examples/first_example_standalone/schema:manual_ctrl_event_serializer.so + protobuf: + package_path: ${JBPFP_PATH}/examples/first_example_standalone/schema.pb + msg_name: manual_ctrl_event + out_io_channel: + - name: outmap + stream_id: 00112233445566778899AABBCCDDEEFF + serde: + file_path: ${JBPFP_PATH}/examples/first_example_standalone/schema:packet_serializer.so + protobuf: + package_path: ${JBPFP_PATH}/examples/first_example_standalone/schema.pb + msg_name: packet +codeletset_id: example_codeletset diff --git a/examples/first_example_standalone/codeletset_unload_request.yaml b/examples/first_example_standalone/codeletset_unload_request.yaml new file mode 100644 index 0000000..f24189a --- /dev/null +++ b/examples/first_example_standalone/codeletset_unload_request.yaml @@ -0,0 +1 @@ +codeletset_id: example_codeletset diff --git a/examples/first_example_standalone/example_app.cpp b/examples/first_example_standalone/example_app.cpp new file mode 100644 index 0000000..3793044 --- /dev/null +++ b/examples/first_example_standalone/example_app.cpp @@ -0,0 +1,275 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +#define BOOST_BIND_GLOBAL_PLACEHOLDERS + +#include +#include +#include +#include +#include +#include + +#include "schema.pb.h" + +#include "jbpf.h" +#include "jbpf_hook.h" +#include "jbpf_defs.h" + +using namespace std; + +#define MAX_SERIALIZED_SIZE 1024 + +int sockfd; +struct sockaddr_in servaddr; + +// Hook declaration and definition. +DECLARE_JBPF_HOOK( + example, + struct jbpf_generic_ctx ctx, + ctx, + HOOK_PROTO(packet *p, int ctx_id), + HOOK_ASSIGN(ctx.ctx_id = ctx_id; ctx.data = (uint64_t)(void *)p; ctx.data_end = (uint64_t)(void *)(p + 1);)) + +DEFINE_JBPF_HOOK(example) + +// Handler function that is invoked every time that jbpf receives one or more buffers of data from a codelet +static void +io_channel_forward_output(jbpf_io_stream_id_t *stream_id, void **bufs, int num_bufs, void *ctx) +{ + auto io_ctx = jbpf_get_io_ctx(); + if (io_ctx == NULL) + { + std::cerr << "Failed to get IO context. Got NULL" << std::endl; + return; + } + + char serialized[MAX_SERIALIZED_SIZE]; + int serialized_size; + + if (stream_id && num_bufs > 0) + { + // Fetch the data and print in JSON format + for (auto i = 0; i < num_bufs; i++) + { + serialized_size = jbpf_io_channel_pack_msg(io_ctx, bufs[i], serialized, sizeof(serialized)); + if (serialized_size > 0) + { + sendto(sockfd, serialized, serialized_size, + MSG_CONFIRM, (const struct sockaddr *)&servaddr, + sizeof(servaddr)); + std::cout << "Message sent, size: " << serialized_size << std::endl; + } + else + { + std::cerr << "Failed to serialize message. Got return code: " << serialized_size << std::endl; + } + } + } +} + +bool done = false; + +void sig_handler(int signo) +{ + done = true; +} + +int handle_signal() +{ + if (signal(SIGINT, sig_handler) == SIG_ERR) + { + return 0; + } + if (signal(SIGTERM, sig_handler) == SIG_ERR) + { + return 0; + } + return -1; +} + +void *fwd_socket_to_channel_in(void *arg) +{ + jbpf_register_thread(); + + auto io_ctx = jbpf_get_io_ctx(); + + if (io_ctx == NULL) + { + std::cerr << "Failed to get IO context. Got NULL" << std::endl; + exit(0); + } + + int sockfd, connfd; + socklen_t len; + struct sockaddr_in servaddr, cli; + // socket create and verification + sockfd = socket(AF_INET, SOCK_STREAM, 0); + if (sockfd == -1) + { + printf("socket creation failed...\n"); + exit(0); + } + else + printf("Socket successfully created..\n"); + bzero(&servaddr, sizeof(servaddr)); + servaddr.sin_family = AF_INET; + servaddr.sin_addr.s_addr = htonl(INADDR_ANY); + servaddr.sin_port = htons(20787); + if ((bind(sockfd, (struct sockaddr *)&servaddr, sizeof(servaddr))) != 0) + { + printf("socket bind failed...\n"); + exit(0); + } + else + printf("Socket successfully binded..\n"); + if ((listen(sockfd, 5)) != 0) + { + printf("Listen failed...\n"); + exit(0); + } + else + printf("Server listening..\n"); + len = sizeof(cli); + for (;;) + { + connfd = accept(sockfd, (struct sockaddr *)&cli, &len); + if (connfd < 0) + { + printf("server accept failed...\n"); + exit(0); + } + else + printf("server accept the client...\n"); + char buff[MAX_SERIALIZED_SIZE]; + int n; + struct jbpf_io_stream_id stream_id = {0}; + for (;;) + { + auto n_diff = read(connfd, &buff[n], sizeof(buff) - n); + n += n_diff; + if (n_diff == 0) + { + printf("Client disconnected\n"); + break; + } + else if (n >= 18) + { + uint16_t payload_size = buff[1] * 256 + buff[0]; + if (n < payload_size + 2) + { + continue; + } + else if (n > payload_size + 2) + { + std::cerr << "Unexpected number of bytes in buffer, expected: " << payload_size << ", got: " << n - 2 << std::endl; + break; + } + + jbpf_channel_buf_ptr deserialized = jbpf_io_channel_unpack_msg(io_ctx, &buff[2], payload_size, &stream_id); + if (deserialized == NULL) + { + std::cerr << "Failed to deserialize message. Got NULL" << std::endl; + } + else + { + auto io_channel = jbpf_io_find_channel(io_ctx, stream_id, false); + if (io_channel) + { + auto ret = jbpf_io_channel_submit_buf(io_channel); + if (ret != 0) + { + std::cerr << "Failed to send message to channel. Got return code: " << ret << std::endl; + } + else + { + std::cout << "Dispatched msg of size: " << payload_size << std::endl; + } + } + else + { + std::cerr << "Failed to find io channel. Got NULL" << std::endl; + } + } + bzero(buff, MAX_SERIALIZED_SIZE); + n = 0; + } + } + } + close(sockfd); + // exit the current thread + pthread_exit(NULL); +} + +int main(int argc, char **argv) +{ + // Creating socket file descriptor + if ((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) + { + perror("socket creation failed"); + exit(EXIT_FAILURE); + } + + memset(&servaddr, 0, sizeof(servaddr)); + + // Filling server information + servaddr.sin_family = AF_INET; + servaddr.sin_port = htons(20788); + servaddr.sin_addr.s_addr = INADDR_ANY; + + struct jbpf_config jbpf_config = {0}; + jbpf_set_default_config_options(&jbpf_config); + + // Enable LCM IPC interface using UNIX socket at the default socket path (the default is through C API) + jbpf_config.lcm_ipc_config.has_lcm_ipc_thread = true; + snprintf( + jbpf_config.lcm_ipc_config.lcm_ipc_name, + sizeof(jbpf_config.lcm_ipc_config.lcm_ipc_name) - 1, + "%s", + JBPF_DEFAULT_LCM_SOCKET); + + if (!handle_signal()) + { + std::cout << "Could not register signal handler" << std::endl; + return -1; + } + + // Initialize jbpf + if (jbpf_init(&jbpf_config) < 0) + { + return -1; + } + + pthread_t ptid; + pthread_create(&ptid, NULL, &fwd_socket_to_channel_in, NULL); + + // Any thread that calls a hook must be registered + jbpf_register_thread(); + + // Register the callback to handle output messages from codelets + jbpf_register_io_output_cb(io_channel_forward_output); + + int i = 0; + + // Sample application code calling a hook every second + while (!done) + { + packet p; + p.seq_no = i; + p.value = -i; + + std::stringstream ss; + ss << "instance " << i; + + std::strcpy(p.name, ss.str().c_str()); + + // Call hook and pass packet + hook_example(&p, 1); + sleep(1); + i++; + } + + jbpf_stop(); + pthread_cancel(ptid); + exit(EXIT_SUCCESS); + + return 0; +} diff --git a/examples/first_example_standalone/example_codelet.c b/examples/first_example_standalone/example_codelet.c new file mode 100644 index 0000000..cfb04f0 --- /dev/null +++ b/examples/first_example_standalone/example_codelet.c @@ -0,0 +1,75 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +#include + +#include "jbpf_defs.h" +#include "jbpf_helper.h" +#include "schema.pb.h" + +// Output map of type JBPF_MAP_TYPE_RINGBUF. +// The map is used to send out data of type packet. +// It holds a ringbuffer with a total of 3 elements. +jbpf_ringbuf_map(outmap, packet, 3); + +// Input map of type JBPF_MAP_TYPE_CONTROL_INPUT. +// The map is used to receive data of type manual_ctrl_event. +// It uses a ringbuffer, that can store a total of 3 elements. +jbpf_control_input_map(inmap, manual_ctrl_event, 3); + +// A map of type JBPF_MAP_TYPE_ARRAY, which is used +// to store internal codelet state. +struct jbpf_load_map_def SEC("maps") counter = { + .type = JBPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 1, +}; + +SEC("jbpf_generic") +uint64_t +jbpf_main(void *state) +{ + + void *c; + int cnt; + struct jbpf_generic_ctx *ctx; + packet *p, *p_end; + packet echo; + manual_ctrl_event resp = {0}; + uint64_t index = 0; + + ctx = state; + + c = jbpf_map_lookup_elem(&counter, &index); + if (!c) + return 1; + + cnt = *(int *)c; + cnt++; + *(uint32_t *)c = cnt; + + p = (packet *)ctx->data; + p_end = (packet *)ctx->data_end; + + if (p + 1 > p_end) + return 1; + + echo = *p; + + // Copy the data that was passed to the codelet to the outmap ringbuffer + // and send them out. + if (jbpf_ringbuf_output(&outmap, &echo, sizeof(echo)) < 0) + { + return 1; + } + + if (jbpf_control_input_receive(&inmap, &resp, sizeof(resp)) == 1) + { + // Print a debug message. This helper function should NOT be used in production environemtns, due to + // its performance overhead. The helper function will be ignored, if *jbpf* has been built with the + // USE_JBPF_PRINTF_HELPER option set to OFF. + jbpf_printf_debug(" Called %d times so far and received manual_ctrl_event with value %d\n\n", cnt, resp.value); + } + + return 0; +} diff --git a/examples/first_example_standalone/load.sh b/examples/first_example_standalone/load.sh new file mode 100755 index 0000000..0269ff0 --- /dev/null +++ b/examples/first_example_standalone/load.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +set -e + +$JBPFP_PATH/pkg/jbpf_protobuf_cli decoder load -c codeletset_load_request.yaml + +$JBPF_PATH/out/bin/jbpf_lcm_cli -l -c codeletset_load_request.yaml diff --git a/examples/first_example_standalone/run_app.sh b/examples/first_example_standalone/run_app.sh new file mode 100755 index 0000000..6b1b44e --- /dev/null +++ b/examples/first_example_standalone/run_app.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$JBPF_PATH/out/lib ./example_app diff --git a/examples/first_example_standalone/run_decoder.sh b/examples/first_example_standalone/run_decoder.sh new file mode 100755 index 0000000..0458541 --- /dev/null +++ b/examples/first_example_standalone/run_decoder.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +$JBPFP_PATH/pkg/jbpf_protobuf_cli decoder run diff --git a/examples/first_example_standalone/schema.options b/examples/first_example_standalone/schema.options new file mode 100644 index 0000000..618bb99 --- /dev/null +++ b/examples/first_example_standalone/schema.options @@ -0,0 +1 @@ +packet.name max_size:32 diff --git a/examples/first_example_standalone/schema.proto b/examples/first_example_standalone/schema.proto new file mode 100644 index 0000000..b3db3d3 --- /dev/null +++ b/examples/first_example_standalone/schema.proto @@ -0,0 +1,9 @@ +syntax = "proto2"; + +message packet { + required int32 seq_no = 1; + required int32 value = 2; + required string name = 3; +} + +message manual_ctrl_event { required int32 value = 1; } diff --git a/examples/first_example_standalone/send_input_msg.sh b/examples/first_example_standalone/send_input_msg.sh new file mode 100755 index 0000000..b00c4df --- /dev/null +++ b/examples/first_example_standalone/send_input_msg.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +$JBPFP_PATH/pkg/jbpf_protobuf_cli input forward \ + -c codeletset_load_request.yaml \ + --stream-id 11111111-1111-1111-1111-111111111111 \ + --inline-json "{\"value\": $1}" diff --git a/examples/first_example_standalone/unload.sh b/examples/first_example_standalone/unload.sh new file mode 100755 index 0000000..1b5e2b7 --- /dev/null +++ b/examples/first_example_standalone/unload.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +$JBPF_PATH/out/bin/jbpf_lcm_cli -u -c codeletset_unload_request.yaml + +$JBPFP_PATH/pkg/jbpf_protobuf_cli decoder unload -c codeletset_load_request.yaml diff --git a/init_submodules.sh b/init_submodules.sh new file mode 100755 index 0000000..e181355 --- /dev/null +++ b/init_submodules.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +rm -rf 3p/nanopb jbpf +git submodule update --init --recursive +cd jbpf +./init_and_patch_submodules.sh +cd .. \ No newline at end of file diff --git a/jbpf b/jbpf new file mode 160000 index 0000000..d821e42 --- /dev/null +++ b/jbpf @@ -0,0 +1 @@ +Subproject commit d821e42e56884ebdf5d66520ed2df30f136a90af diff --git a/pkg/.gitignore b/pkg/.gitignore new file mode 100644 index 0000000..899d913 --- /dev/null +++ b/pkg/.gitignore @@ -0,0 +1,2 @@ +*.pb.go +jbpf_protobuf_cli diff --git a/pkg/.golangci.yml b/pkg/.golangci.yml new file mode 100644 index 0000000..6b06203 --- /dev/null +++ b/pkg/.golangci.yml @@ -0,0 +1,21 @@ +# Refer to golangci-lint's example config file for more options and information: +# https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml + +run: + timeout: 5m + modules-download-mode: readonly + +linters: + enable: + - errcheck + - goimports + - govet + - revive + - staticcheck + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 0 + exclude: + - "package-comments" diff --git a/pkg/Makefile b/pkg/Makefile new file mode 100644 index 0000000..c383f28 --- /dev/null +++ b/pkg/Makefile @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +BINARY_NAME := jbpf_protobuf_cli +CURRENT_DIR = $(shell pwd) +NANO_PB ?= $(shell dirname $(shell pwd))/3p/nanopb +TEST_WORKDIR ?= $(shell dirname $(shell pwd))/testdata +REGENERATE_SNAPSHOT ?= false +OUT_DIR ?= . + +.PHONY : mod clean lint test testclean + +${BINARY_NAME}: clean mod + CGO_ENABLED=0 go build --trimpath -o ${OUT_DIR}/${BINARY_NAME} main.go + +mod: + go mod tidy + +clean: + rm -f ${OUT_DIR}/${BINARY_NAME} + +lint: + golangci-lint run + +test: + TEST_WORKDIR=${TEST_WORKDIR} \ + NANO_PB=${NANO_PB} \ + SNAPSHOT_DIR=${CURRENT_DIR}/__snapshots__ \ + REGENERATE_SNAPSHOT=${REGENERATE_SNAPSHOT} \ + go test -v ./... + +testclean: + rm -r ${CURRENT_DIR}/__snapshots__/*; \ + go clean -testcache diff --git a/pkg/README.md b/pkg/README.md new file mode 100644 index 0000000..a54fc9c --- /dev/null +++ b/pkg/README.md @@ -0,0 +1,20 @@ +# Introduction + +`jbpf_protobuf_cli` is a cli tool to build serialization assets to use protobuf as a content encoding mechanism to send data to and from jbpf. + +To build locally, run the following command: +```sh +make +``` + +To lint and test locally, run the following command: +```sh +make lint test -j +``` + +# Usage + +For detailed usage, build then run: +```sh +./jbpf_protobuf_cli --help +``` diff --git a/pkg/__snapshots__/example1/example.pb b/pkg/__snapshots__/example1/example.pb new file mode 100644 index 0000000..16170bb Binary files /dev/null and b/pkg/__snapshots__/example1/example.pb differ diff --git a/pkg/__snapshots__/example1/example.pb.c b/pkg/__snapshots__/example1/example.pb.c new file mode 100644 index 0000000..c20248f --- /dev/null +++ b/pkg/__snapshots__/example1/example.pb.c @@ -0,0 +1,26 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-1.0.0-dev */ + +#include "example.pb.h" +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +PB_BIND(my_struct, my_struct, AUTO) + + +PB_BIND(request, request, AUTO) + + +PB_BIND(response, response, AUTO) + + +PB_BIND(req_resp, req_resp, AUTO) + + +PB_BIND(status, status, AUTO) + + + + + diff --git a/pkg/__snapshots__/example1/example.pb.h b/pkg/__snapshots__/example1/example.pb.h new file mode 100644 index 0000000..96d4a5c --- /dev/null +++ b/pkg/__snapshots__/example1/example.pb.h @@ -0,0 +1,155 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-1.0.0-dev */ + +#ifndef PB_EXAMPLE_PB_H_INCLUDED +#define PB_EXAMPLE_PB_H_INCLUDED +#include + +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +/* Enum definitions */ +typedef enum _my_state { + my_state_GOOD = 0, + my_state_BAD = 1 +} my_state; + +/* Struct definitions */ +typedef struct _my_struct { + uint32_t a_num; + bool has_another_num; + uint32_t another_num; +} my_struct; + +typedef struct _request { + uint32_t id; + char name[32]; + bool has_state; + my_state state; +} request; + +typedef struct _response { + uint32_t id; + char msg[100]; +} response; + +typedef struct _req_resp { + pb_size_t which_req_or_resp; + union _req_resp_req_or_resp { + request req; + response resp; + } req_or_resp; +} req_resp; + +typedef struct _status { + uint32_t id; + char status[100]; + my_struct a_struct; +} status; + + +#ifdef __cplusplus +extern "C" { +#endif + +/* Helper constants for enums */ +#define _my_state_MIN my_state_GOOD +#define _my_state_MAX my_state_BAD +#define _my_state_ARRAYSIZE ((my_state)(my_state_BAD+1)) + + +#define request_state_ENUMTYPE my_state + + + + + +/* Initializer values for message structs */ +#define my_struct_init_default {0, false, 0} +#define request_init_default {0, "", false, _my_state_MIN} +#define response_init_default {0, ""} +#define req_resp_init_default {0, {request_init_default}} +#define status_init_default {0, "", my_struct_init_default} +#define my_struct_init_zero {0, false, 0} +#define request_init_zero {0, "", false, _my_state_MIN} +#define response_init_zero {0, ""} +#define req_resp_init_zero {0, {request_init_zero}} +#define status_init_zero {0, "", my_struct_init_zero} + +/* Field tags (for use in manual encoding/decoding) */ +#define my_struct_a_num_tag 1 +#define my_struct_another_num_tag 2 +#define request_id_tag 1 +#define request_name_tag 2 +#define request_state_tag 3 +#define response_id_tag 1 +#define response_msg_tag 2 +#define req_resp_req_tag 1 +#define req_resp_resp_tag 2 +#define status_id_tag 1 +#define status_status_tag 2 +#define status_a_struct_tag 3 + +/* Struct field encoding specification for nanopb */ +#define my_struct_FIELDLIST(X, a) \ +X(a, STATIC, REQUIRED, UINT32, a_num, 1) \ +X(a, STATIC, OPTIONAL, UINT32, another_num, 2) +#define my_struct_CALLBACK NULL +#define my_struct_DEFAULT NULL + +#define request_FIELDLIST(X, a) \ +X(a, STATIC, REQUIRED, UINT32, id, 1) \ +X(a, STATIC, REQUIRED, STRING, name, 2) \ +X(a, STATIC, OPTIONAL, UENUM, state, 3) +#define request_CALLBACK NULL +#define request_DEFAULT NULL + +#define response_FIELDLIST(X, a) \ +X(a, STATIC, REQUIRED, UINT32, id, 1) \ +X(a, STATIC, REQUIRED, STRING, msg, 2) +#define response_CALLBACK NULL +#define response_DEFAULT NULL + +#define req_resp_FIELDLIST(X, a) \ +X(a, STATIC, ONEOF, MESSAGE, (req_or_resp,req,req_or_resp.req), 1) \ +X(a, STATIC, ONEOF, MESSAGE, (req_or_resp,resp,req_or_resp.resp), 2) +#define req_resp_CALLBACK NULL +#define req_resp_DEFAULT NULL +#define req_resp_req_or_resp_req_MSGTYPE request +#define req_resp_req_or_resp_resp_MSGTYPE response + +#define status_FIELDLIST(X, a) \ +X(a, STATIC, REQUIRED, UINT32, id, 1) \ +X(a, STATIC, REQUIRED, STRING, status, 2) \ +X(a, STATIC, REQUIRED, MESSAGE, a_struct, 3) +#define status_CALLBACK NULL +#define status_DEFAULT NULL +#define status_a_struct_MSGTYPE my_struct + +extern const pb_msgdesc_t my_struct_msg; +extern const pb_msgdesc_t request_msg; +extern const pb_msgdesc_t response_msg; +extern const pb_msgdesc_t req_resp_msg; +extern const pb_msgdesc_t status_msg; + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define my_struct_fields &my_struct_msg +#define request_fields &request_msg +#define response_fields &response_msg +#define req_resp_fields &req_resp_msg +#define status_fields &status_msg + +/* Maximum encoded size of messages (where known) */ +#define EXAMPLE_PB_H_MAX_SIZE status_size +#define my_struct_size 12 +#define req_resp_size 109 +#define request_size 41 +#define response_size 107 +#define status_size 121 + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/pkg/__snapshots__/example1/example:req_resp_serializer.c b/pkg/__snapshots__/example1/example:req_resp_serializer.c new file mode 100644 index 0000000..00b1c1e --- /dev/null +++ b/pkg/__snapshots__/example1/example:req_resp_serializer.c @@ -0,0 +1,26 @@ +#define PB_FIELD_32BIT 1 +#include +#include +#include +#include "example.pb.h" + +const uint32_t proto_message_size = sizeof(req_resp); + +int jbpf_io_serialize(void* input_msg_buf, size_t input_msg_buf_size, char* serialized_data_buf, size_t serialized_data_buf_size) { + if (input_msg_buf_size != proto_message_size) + return -1; + + pb_ostream_t ostream = pb_ostream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + if (!pb_encode(&ostream, req_resp_fields, input_msg_buf)) + return -1; + + return ostream.bytes_written; +} + +int jbpf_io_deserialize(char* serialized_data_buf, size_t serialized_data_buf_size, void* output_msg_buf, size_t output_msg_buf_size) { + if (output_msg_buf_size != proto_message_size) + return 0; + + pb_istream_t istream = pb_istream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + return pb_decode(&istream, req_resp_fields, output_msg_buf); +} diff --git a/pkg/__snapshots__/example1/example:status_serializer.c b/pkg/__snapshots__/example1/example:status_serializer.c new file mode 100644 index 0000000..f9d74a1 --- /dev/null +++ b/pkg/__snapshots__/example1/example:status_serializer.c @@ -0,0 +1,26 @@ +#define PB_FIELD_32BIT 1 +#include +#include +#include +#include "example.pb.h" + +const uint32_t proto_message_size = sizeof(status); + +int jbpf_io_serialize(void* input_msg_buf, size_t input_msg_buf_size, char* serialized_data_buf, size_t serialized_data_buf_size) { + if (input_msg_buf_size != proto_message_size) + return -1; + + pb_ostream_t ostream = pb_ostream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + if (!pb_encode(&ostream, status_fields, input_msg_buf)) + return -1; + + return ostream.bytes_written; +} + +int jbpf_io_deserialize(char* serialized_data_buf, size_t serialized_data_buf_size, void* output_msg_buf, size_t output_msg_buf_size) { + if (output_msg_buf_size != proto_message_size) + return 0; + + pb_istream_t istream = pb_istream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + return pb_decode(&istream, status_fields, output_msg_buf); +} diff --git a/pkg/__snapshots__/example2/example2.pb b/pkg/__snapshots__/example2/example2.pb new file mode 100644 index 0000000..130042b --- /dev/null +++ b/pkg/__snapshots__/example2/example2.pb @@ -0,0 +1,6 @@ + +> +example2.proto", +item +name ( Rname +val ( Rval \ No newline at end of file diff --git a/pkg/__snapshots__/example2/example2.pb.c b/pkg/__snapshots__/example2/example2.pb.c new file mode 100644 index 0000000..481214a --- /dev/null +++ b/pkg/__snapshots__/example2/example2.pb.c @@ -0,0 +1,12 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-1.0.0-dev */ + +#include "example2.pb.h" +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +PB_BIND(item, item, AUTO) + + + diff --git a/pkg/__snapshots__/example2/example2.pb.h b/pkg/__snapshots__/example2/example2.pb.h new file mode 100644 index 0000000..62bea39 --- /dev/null +++ b/pkg/__snapshots__/example2/example2.pb.h @@ -0,0 +1,52 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-1.0.0-dev */ + +#ifndef PB_EXAMPLE2_PB_H_INCLUDED +#define PB_EXAMPLE2_PB_H_INCLUDED +#include + +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +/* Struct definitions */ +typedef struct _item { + char name[30]; + bool has_val; + uint32_t val; +} item; + + +#ifdef __cplusplus +extern "C" { +#endif + +/* Initializer values for message structs */ +#define item_init_default {"", false, 0} +#define item_init_zero {"", false, 0} + +/* Field tags (for use in manual encoding/decoding) */ +#define item_name_tag 1 +#define item_val_tag 2 + +/* Struct field encoding specification for nanopb */ +#define item_FIELDLIST(X, a) \ +X(a, STATIC, REQUIRED, STRING, name, 1) \ +X(a, STATIC, OPTIONAL, UINT32, val, 2) +#define item_CALLBACK NULL +#define item_DEFAULT NULL + +extern const pb_msgdesc_t item_msg; + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define item_fields &item_msg + +/* Maximum encoded size of messages (where known) */ +#define EXAMPLE2_PB_H_MAX_SIZE item_size +#define item_size 37 + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/pkg/__snapshots__/example2/example2:item_serializer.c b/pkg/__snapshots__/example2/example2:item_serializer.c new file mode 100644 index 0000000..24702e3 --- /dev/null +++ b/pkg/__snapshots__/example2/example2:item_serializer.c @@ -0,0 +1,26 @@ +#define PB_FIELD_32BIT 1 +#include +#include +#include +#include "example2.pb.h" + +const uint32_t proto_message_size = sizeof(item); + +int jbpf_io_serialize(void* input_msg_buf, size_t input_msg_buf_size, char* serialized_data_buf, size_t serialized_data_buf_size) { + if (input_msg_buf_size != proto_message_size) + return -1; + + pb_ostream_t ostream = pb_ostream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + if (!pb_encode(&ostream, item_fields, input_msg_buf)) + return -1; + + return ostream.bytes_written; +} + +int jbpf_io_deserialize(char* serialized_data_buf, size_t serialized_data_buf_size, void* output_msg_buf, size_t output_msg_buf_size) { + if (output_msg_buf_size != proto_message_size) + return 0; + + pb_istream_t istream = pb_istream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + return pb_decode(&istream, item_fields, output_msg_buf); +} diff --git a/pkg/__snapshots__/example3/example3.pb b/pkg/__snapshots__/example3/example3.pb new file mode 100644 index 0000000..e5e3166 --- /dev/null +++ b/pkg/__snapshots__/example3/example3.pb @@ -0,0 +1,31 @@ + + +example3.proto"ý +obj +bval (Rbval +bytesval ( Rbytesval +dval (Rdval +f32val (Rf32val +f64val (Rf64val +i32val (Ri32val +i64val (Ri64val +sf32val (Rsf32val +sf64val (Rsf64val +si32val + (Rsi32val +si64val (Rsi64val +sval ( Rsval +ui32val ( Rui32val +ui64val (Rui64val +barr (Rbarr +darr (Rdarr +f32arr (Rf32arr +f64arr (Rf64arr +i32arr (Ri32arr +i64arr (Ri64arr +sf32arr (Rsf32arr +sf64arr (Rsf64arr +si32arr (Rsi32arr +si64arr (Rsi64arr +ui32arr ( Rui32arr +ui64arr (Rui64arr \ No newline at end of file diff --git a/pkg/__snapshots__/example3/example3.pb.c b/pkg/__snapshots__/example3/example3.pb.c new file mode 100644 index 0000000..35209fe --- /dev/null +++ b/pkg/__snapshots__/example3/example3.pb.c @@ -0,0 +1,20 @@ +/* Automatically generated nanopb constant definitions */ +/* Generated by nanopb-1.0.0-dev */ + +#include "example3.pb.h" +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +PB_BIND(obj, obj, 2) + + + +#ifndef PB_CONVERT_DOUBLE_FLOAT +/* On some platforms (such as AVR), double is really float. + * To be able to encode/decode double on these platforms, you need. + * to define PB_CONVERT_DOUBLE_FLOAT in pb.h or compiler command line. + */ +PB_STATIC_ASSERT(sizeof(double) == 8, DOUBLE_MUST_BE_8_BYTES) +#endif + diff --git a/pkg/__snapshots__/example3/example3.pb.h b/pkg/__snapshots__/example3/example3.pb.h new file mode 100644 index 0000000..5428ff1 --- /dev/null +++ b/pkg/__snapshots__/example3/example3.pb.h @@ -0,0 +1,136 @@ +/* Automatically generated nanopb header */ +/* Generated by nanopb-1.0.0-dev */ + +#ifndef PB_EXAMPLE3_PB_H_INCLUDED +#define PB_EXAMPLE3_PB_H_INCLUDED +#include + +#if PB_PROTO_HEADER_VERSION != 40 +#error Regenerate this file with the current version of nanopb generator. +#endif + +/* Struct definitions */ +typedef PB_BYTES_ARRAY_T(20) obj_bytesval_t; +typedef struct _obj { + bool bval; + obj_bytesval_t bytesval; + double dval; + uint32_t f32val; + uint64_t f64val; + int32_t i32val; + int64_t i64val; + int32_t sf32val; + int64_t sf64val; + int32_t si32val; + int64_t si64val; + char sval[20]; + uint32_t ui32val; + uint64_t ui64val; + pb_size_t barr_count; + bool barr[10]; + pb_size_t darr_count; + double darr[10]; + pb_size_t f32arr_count; + uint32_t f32arr[10]; + pb_size_t f64arr_count; + uint64_t f64arr[10]; + pb_size_t i32arr_count; + int32_t i32arr[10]; + pb_size_t i64arr_count; + int64_t i64arr[10]; + pb_size_t sf32arr_count; + int32_t sf32arr[10]; + pb_size_t sf64arr_count; + int64_t sf64arr[10]; + pb_size_t si32arr_count; + int32_t si32arr[10]; + pb_size_t si64arr_count; + int64_t si64arr[10]; + pb_size_t ui32arr_count; + uint32_t ui32arr[10]; + pb_size_t ui64arr_count; + uint64_t ui64arr[10]; +} obj; + + +#ifdef __cplusplus +extern "C" { +#endif + +/* Initializer values for message structs */ +#define obj_init_default {0, {0, {0}}, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", 0, 0, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} +#define obj_init_zero {0, {0, {0}}, 0, 0, 0, 0, 0, 0, 0, 0, 0, "", 0, 0, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + +/* Field tags (for use in manual encoding/decoding) */ +#define obj_bval_tag 1 +#define obj_bytesval_tag 2 +#define obj_dval_tag 3 +#define obj_f32val_tag 4 +#define obj_f64val_tag 5 +#define obj_i32val_tag 6 +#define obj_i64val_tag 7 +#define obj_sf32val_tag 8 +#define obj_sf64val_tag 9 +#define obj_si32val_tag 10 +#define obj_si64val_tag 11 +#define obj_sval_tag 12 +#define obj_ui32val_tag 13 +#define obj_ui64val_tag 14 +#define obj_barr_tag 15 +#define obj_darr_tag 16 +#define obj_f32arr_tag 17 +#define obj_f64arr_tag 18 +#define obj_i32arr_tag 19 +#define obj_i64arr_tag 20 +#define obj_sf32arr_tag 21 +#define obj_sf64arr_tag 22 +#define obj_si32arr_tag 23 +#define obj_si64arr_tag 24 +#define obj_ui32arr_tag 25 +#define obj_ui64arr_tag 26 + +/* Struct field encoding specification for nanopb */ +#define obj_FIELDLIST(X, a) \ +X(a, STATIC, REQUIRED, BOOL, bval, 1) \ +X(a, STATIC, REQUIRED, BYTES, bytesval, 2) \ +X(a, STATIC, REQUIRED, DOUBLE, dval, 3) \ +X(a, STATIC, REQUIRED, FIXED32, f32val, 4) \ +X(a, STATIC, REQUIRED, FIXED64, f64val, 5) \ +X(a, STATIC, REQUIRED, INT32, i32val, 6) \ +X(a, STATIC, REQUIRED, INT64, i64val, 7) \ +X(a, STATIC, REQUIRED, SFIXED32, sf32val, 8) \ +X(a, STATIC, REQUIRED, SFIXED64, sf64val, 9) \ +X(a, STATIC, REQUIRED, SINT32, si32val, 10) \ +X(a, STATIC, REQUIRED, SINT64, si64val, 11) \ +X(a, STATIC, REQUIRED, STRING, sval, 12) \ +X(a, STATIC, REQUIRED, UINT32, ui32val, 13) \ +X(a, STATIC, REQUIRED, UINT64, ui64val, 14) \ +X(a, STATIC, REPEATED, BOOL, barr, 15) \ +X(a, STATIC, REPEATED, DOUBLE, darr, 16) \ +X(a, STATIC, REPEATED, FIXED32, f32arr, 17) \ +X(a, STATIC, REPEATED, FIXED64, f64arr, 18) \ +X(a, STATIC, REPEATED, INT32, i32arr, 19) \ +X(a, STATIC, REPEATED, INT64, i64arr, 20) \ +X(a, STATIC, REPEATED, SFIXED32, sf32arr, 21) \ +X(a, STATIC, REPEATED, SFIXED64, sf64arr, 22) \ +X(a, STATIC, REPEATED, SINT32, si32arr, 23) \ +X(a, STATIC, REPEATED, SINT64, si64arr, 24) \ +X(a, STATIC, REPEATED, UINT32, ui32arr, 25) \ +X(a, STATIC, REPEATED, UINT64, ui64arr, 26) +#define obj_CALLBACK NULL +#define obj_DEFAULT NULL + +extern const pb_msgdesc_t obj_msg; + +/* Defines for backwards compatibility with code written before nanopb-0.4.0 */ +#define obj_fields &obj_msg + +/* Maximum encoded size of messages (where known) */ +#define EXAMPLE3_PB_H_MAX_SIZE obj_size +#define obj_size 1198 + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif diff --git a/pkg/__snapshots__/example3/example3:obj_serializer.c b/pkg/__snapshots__/example3/example3:obj_serializer.c new file mode 100644 index 0000000..f66f508 --- /dev/null +++ b/pkg/__snapshots__/example3/example3:obj_serializer.c @@ -0,0 +1,26 @@ +#define PB_FIELD_32BIT 1 +#include +#include +#include +#include "example3.pb.h" + +const uint32_t proto_message_size = sizeof(obj); + +int jbpf_io_serialize(void* input_msg_buf, size_t input_msg_buf_size, char* serialized_data_buf, size_t serialized_data_buf_size) { + if (input_msg_buf_size != proto_message_size) + return -1; + + pb_ostream_t ostream = pb_ostream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + if (!pb_encode(&ostream, obj_fields, input_msg_buf)) + return -1; + + return ostream.bytes_written; +} + +int jbpf_io_deserialize(char* serialized_data_buf, size_t serialized_data_buf_size, void* output_msg_buf, size_t output_msg_buf_size) { + if (output_msg_buf_size != proto_message_size) + return 0; + + pb_istream_t istream = pb_istream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + return pb_decode(&istream, obj_fields, output_msg_buf); +} diff --git a/pkg/cmd/decoder/decoder.go b/pkg/cmd/decoder/decoder.go new file mode 100644 index 0000000..26ea3a3 --- /dev/null +++ b/pkg/cmd/decoder/decoder.go @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package decoder + +import ( + "jbpf_protobuf_cli/cmd/decoder/load" + "jbpf_protobuf_cli/cmd/decoder/run" + "jbpf_protobuf_cli/cmd/decoder/unload" + "jbpf_protobuf_cli/common" + + "github.com/spf13/cobra" +) + +// Command returns the decoder commands +func Command(opts *common.GeneralOptions) *cobra.Command { + cmd := &cobra.Command{ + Use: "decoder", + Long: "Execute a decoder subcommand.", + Short: "Execute a decoder subcommand", + } + cmd.AddCommand( + load.Command(opts), + unload.Command(opts), + run.Command(opts), + ) + return cmd +} diff --git a/pkg/cmd/decoder/load/load.go b/pkg/cmd/decoder/load/load.go new file mode 100644 index 0000000..03f88d5 --- /dev/null +++ b/pkg/cmd/decoder/load/load.go @@ -0,0 +1,97 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package load + +import ( + "errors" + "jbpf_protobuf_cli/common" + "jbpf_protobuf_cli/schema" + + "github.com/google/uuid" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type runOptions struct { + decoderAPI *schema.Options + general *common.GeneralOptions + + compiledProtos map[string]*common.File + configFiles []string + configs []*common.CodeletsetConfig +} + +func addToFlags(flags *pflag.FlagSet, opts *runOptions) { + flags.StringArrayVarP(&opts.configFiles, "config", "c", []string{}, "configuration files to load") +} + +func (o *runOptions) parse() (err error) { + o.configs, err = common.CodeletsetConfigFromFiles(o.configFiles...) + if err != nil { + return + } + o.compiledProtos, err = common.LoadCompiledProtos(o.configs, false, true) + return +} + +// Command Load a schema to a local decoder +func Command(opts *common.GeneralOptions) *cobra.Command { + runOptions := &runOptions{ + decoderAPI: &schema.Options{}, + general: opts, + } + cmd := &cobra.Command{ + Use: "load", + Short: "Load a schema to a local decoder", + Long: "Load a schema to a local decoder", + RunE: func(cmd *cobra.Command, _ []string) error { + return run(cmd, runOptions) + }, + SilenceUsage: true, + } + addToFlags(cmd.PersistentFlags(), runOptions) + schema.AddOptionsToFlags(cmd.PersistentFlags(), runOptions.decoderAPI) + return cmd +} + +func run(cmd *cobra.Command, opts *runOptions) error { + if err := errors.Join( + opts.general.Parse(), + opts.decoderAPI.Parse(), + opts.parse(), + ); err != nil { + return err + } + + logger := opts.general.Logger + + client, err := schema.NewClient(cmd.Context(), logger, opts.decoderAPI) + if err != nil { + return err + } + + schemas := make(map[string]*schema.LoadRequest) + + for _, config := range opts.configs { + for _, desc := range config.CodeletDescriptor { + for _, io := range desc.OutIOChannel { + if existing, ok := schemas[io.Serde.Protobuf.PackageName]; ok { + existing.Streams[io.StreamUUID] = io.Serde.Protobuf.MsgName + } else { + compiledProto, ok := opts.compiledProtos[io.Serde.Protobuf.PackagePath] + if !ok { + return errors.New("compiled proto not found") + } + schemas[io.Serde.Protobuf.PackageName] = &schema.LoadRequest{ + CompiledProto: compiledProto.Data, + Streams: map[uuid.UUID]string{ + io.StreamUUID: io.Serde.Protobuf.MsgName, + }, + } + } + } + } + } + + return client.Load(schemas) +} diff --git a/pkg/cmd/decoder/run/run.go b/pkg/cmd/decoder/run/run.go new file mode 100644 index 0000000..1e96e0e --- /dev/null +++ b/pkg/cmd/decoder/run/run.go @@ -0,0 +1,79 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package run + +import ( + "errors" + "jbpf_protobuf_cli/common" + "jbpf_protobuf_cli/data" + "jbpf_protobuf_cli/schema" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" +) + +type runOptions struct { + general *common.GeneralOptions + data *data.ServerOptions + decoderAPI *schema.Options +} + +// Command Run decoder to collect, decode and print jbpf output +func Command(opts *common.GeneralOptions) *cobra.Command { + runOptions := &runOptions{ + general: opts, + data: &data.ServerOptions{}, + decoderAPI: &schema.Options{}, + } + cmd := &cobra.Command{ + Use: "run", + Short: "Run decoder to collect, decode and print jbpf output", + Long: "Run dynamic protobuf decoder to collect, decode and print jbpf output.", + RunE: func(cmd *cobra.Command, _ []string) error { + return run(cmd, runOptions) + }, + SilenceUsage: true, + } + schema.AddOptionsToFlags(cmd.PersistentFlags(), runOptions.decoderAPI) + data.AddServerOptionsToFlags(cmd.PersistentFlags(), runOptions.data) + return cmd +} + +func run(cmd *cobra.Command, opts *runOptions) error { + if err := errors.Join( + opts.general.Parse(), + opts.data.Parse(), + opts.decoderAPI.Parse(), + ); err != nil { + return err + } + + logger := opts.general.Logger + + store := schema.NewStore() + + schemaServer := schema.NewServer(cmd.Context(), logger, opts.decoderAPI, store) + + dataServer, err := data.NewServer(cmd.Context(), logger, opts.data, store) + if err != nil { + return err + } + + g, _ := errgroup.WithContext(cmd.Context()) + + g.Go(func() error { + return dataServer.Listen(func(streamUUID uuid.UUID, data []byte) { + logger.WithFields(logrus.Fields{ + "streamUUID": streamUUID.String(), + }).Info(string(data)) + }) + }) + + g.Go(func() error { + return schemaServer.Serve() + }) + + return g.Wait() +} diff --git a/pkg/cmd/decoder/unload/unload.go b/pkg/cmd/decoder/unload/unload.go new file mode 100644 index 0000000..6bf82cf --- /dev/null +++ b/pkg/cmd/decoder/unload/unload.go @@ -0,0 +1,83 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package unload + +import ( + "errors" + "jbpf_protobuf_cli/common" + "jbpf_protobuf_cli/schema" + + "github.com/google/uuid" + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +const ( + maxStreamUUIDs = 1024 +) + +type runOptions struct { + decoderAPI *schema.Options + general *common.GeneralOptions + + configFiles []string + configs []*common.CodeletsetConfig +} + +func addToFlags(flags *pflag.FlagSet, opts *runOptions) { + flags.StringArrayVarP(&opts.configFiles, "config", "c", []string{}, "configuration files to unload") +} + +func (o *runOptions) parse() (err error) { + o.configs, err = common.CodeletsetConfigFromFiles(o.configFiles...) + return +} + +// Command Unload a schema from a local decoder +func Command(opts *common.GeneralOptions) *cobra.Command { + runOptions := &runOptions{ + decoderAPI: &schema.Options{}, + general: opts, + } + cmd := &cobra.Command{ + Use: "unload", + Short: "Unload a schema from a local decoder", + Long: "Unload a schema from a local decoder", + RunE: func(cmd *cobra.Command, _ []string) error { + return run(cmd, runOptions) + }, + SilenceUsage: true, + } + addToFlags(cmd.PersistentFlags(), runOptions) + schema.AddOptionsToFlags(cmd.PersistentFlags(), runOptions.decoderAPI) + return cmd +} + +func run(cmd *cobra.Command, opts *runOptions) error { + if err := errors.Join( + opts.general.Parse(), + opts.decoderAPI.Parse(), + opts.parse(), + ); err != nil { + return err + } + + logger := opts.general.Logger + + client, err := schema.NewClient(cmd.Context(), logger, opts.decoderAPI) + if err != nil { + return err + } + + streamUUIDs := make([]uuid.UUID, 0, maxStreamUUIDs) + + for _, config := range opts.configs { + for _, desc := range config.CodeletDescriptor { + for _, io := range desc.OutIOChannel { + streamUUIDs = append(streamUUIDs, io.StreamUUID) + } + } + } + + return client.Unload(streamUUIDs) +} diff --git a/pkg/cmd/input/forward/forward.go b/pkg/cmd/input/forward/forward.go new file mode 100644 index 0000000..8b08e1d --- /dev/null +++ b/pkg/cmd/input/forward/forward.go @@ -0,0 +1,194 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package forward + +import ( + "encoding/json" + "errors" + "fmt" + "jbpf_protobuf_cli/common" + "jbpf_protobuf_cli/jbpf" + "os" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" +) + +type runOptions struct { + jbpf *jbpf.Options + general *common.GeneralOptions + + compiledProtos map[string]*common.File + configFiles []string + configs []*common.CodeletsetConfig + filePath string + inlineJSON string + payload string + streamID string + streamUUID uuid.UUID +} + +func addToFlags(flags *pflag.FlagSet, opts *runOptions) { + flags.StringArrayVarP(&opts.configFiles, "config", "c", []string{}, "configuration files to load") + flags.StringVar(&opts.streamID, "stream-id", "00000000-0000-0000-0000-000000000000", "stream ID") + flags.StringVarP(&opts.filePath, "file", "f", "", "path to file containing payload in JSON format") + flags.StringVarP(&opts.inlineJSON, "inline-json", "j", "", "inline payload in JSON format") +} + +func (o *runOptions) parse() (err error) { + o.payload, err = loadInlineJSONOrFromFile(o.inlineJSON, o.filePath) + if err != nil { + return + } + + o.streamUUID, err = uuid.Parse(o.streamID) + if err != nil { + return + } + + o.configs, err = common.CodeletsetConfigFromFiles(o.configFiles...) + if err != nil { + return + } + + o.compiledProtos, err = common.LoadCompiledProtos(o.configs, true, false) + return +} + +// Command Load a schema to a local decoder +func Command(opts *common.GeneralOptions) *cobra.Command { + runOptions := &runOptions{ + jbpf: &jbpf.Options{}, + general: opts, + } + cmd := &cobra.Command{ + Use: "forward", + Short: "Load a control message", + Long: "Load a control message", + RunE: func(cmd *cobra.Command, _ []string) error { + return run(cmd, runOptions) + }, + SilenceUsage: true, + } + addToFlags(cmd.PersistentFlags(), runOptions) + jbpf.AddOptionsToFlags(cmd.PersistentFlags(), runOptions.jbpf) + return cmd +} + +func run(_ *cobra.Command, opts *runOptions) error { + if err := errors.Join( + opts.general.Parse(), + opts.jbpf.Parse(), + opts.parse(), + ); err != nil { + return err + } + + logger := opts.general.Logger + + client, err := jbpf.NewClient(logger, opts.jbpf) + if err != nil { + return err + } + + msg, err := getMessageInstance(opts.configs, opts.compiledProtos, opts.streamUUID) + if err != nil { + return err + } + + err = protojson.Unmarshal([]byte(opts.payload), msg) + if err != nil { + logger.WithError(err).Error("error unmarshalling payload") + return err + } + + logger.WithFields(logrus.Fields{ + "msg": fmt.Sprintf("%T - \"%v\"", msg, msg), + }).Info("sending msg") + + payload, err := proto.Marshal(msg) + if err != nil { + return err + } + + out := append(opts.streamUUID[:], payload...) + + return client.Write(out) +} + +func loadInlineJSONOrFromFile(inlineJSON, filePath string) (string, error) { + if (len(inlineJSON) > 0 && len(filePath) > 0) || (len(inlineJSON) == 0 && len(filePath) == 0) { + return "", errors.New("exactly one of --file or --inline-json can be specified") + } + + if len(filePath) != 0 { + if fi, err := os.Stat(filePath); err != nil { + return "", err + } else if fi.IsDir() { + return "", fmt.Errorf(`expected "%s" to be a file, got a directory`, filePath) + } + payload, err := os.ReadFile(filePath) + if err != nil { + return "", err + } + var deserializedPayload interface{} + err = json.Unmarshal(payload, &deserializedPayload) + if err != nil { + return "", err + } + return string(payload), nil + } + + var deserializedPayload interface{} + err := json.Unmarshal([]byte(inlineJSON), &deserializedPayload) + if err != nil { + return "", err + } + return inlineJSON, nil +} + +func getMessageInstance(configs []*common.CodeletsetConfig, compiledProtos map[string]*common.File, streamUUID uuid.UUID) (*dynamicpb.Message, error) { + for _, config := range configs { + for _, desc := range config.CodeletDescriptor { + for _, io := range desc.InIOChannel { + if io.StreamUUID == streamUUID { + compiledProto := compiledProtos[io.Serde.Protobuf.PackagePath] + + fds := &descriptorpb.FileDescriptorSet{} + if err := proto.Unmarshal(compiledProto.Data, fds); err != nil { + return nil, err + } + + pd, err := protodesc.NewFiles(fds) + if err != nil { + return nil, err + } + + msgName := protoreflect.FullName(io.Serde.Protobuf.MsgName) + var desc protoreflect.Descriptor + desc, err = pd.FindDescriptorByName(msgName) + if err != nil { + return nil, err + } + + md, ok := desc.(protoreflect.MessageDescriptor) + if !ok { + return nil, fmt.Errorf("failed to cast desc to protoreflect.MessageDescriptor, got %T", desc) + } + + return dynamicpb.NewMessage(md), nil + } + } + } + } + + return nil, fmt.Errorf("stream %s not found in any of the loaded schemas", streamUUID) +} diff --git a/pkg/cmd/input/input.go b/pkg/cmd/input/input.go new file mode 100644 index 0000000..1c3aa42 --- /dev/null +++ b/pkg/cmd/input/input.go @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package input + +import ( + "jbpf_protobuf_cli/cmd/input/forward" + "jbpf_protobuf_cli/common" + + "github.com/spf13/cobra" +) + +// Command returns the input commands +func Command(opts *common.GeneralOptions) *cobra.Command { + cmd := &cobra.Command{ + Use: "input", + Long: "Execute a jbpf input subcommand.", + Short: "Execute a jbpf input subcommand", + } + cmd.AddCommand( + forward.Command(opts), + ) + return cmd +} diff --git a/pkg/cmd/serde/serde.go b/pkg/cmd/serde/serde.go new file mode 100644 index 0000000..789be30 --- /dev/null +++ b/pkg/cmd/serde/serde.go @@ -0,0 +1,157 @@ +package serde + +import ( + "errors" + "fmt" + "jbpf_protobuf_cli/common" + "jbpf_protobuf_cli/generator/nanopb" + "jbpf_protobuf_cli/generator/schema" + "log" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +const ( + relativeWorkingDir = "./" +) + +var ( + originalBaseDir string +) + +type parsedProtoConfig struct { + protoPackageName string + protoMessageNames []string +} + +type runOptions struct { + general *common.GeneralOptions + + absOutputDir string + absWorkingDir string + outputDir string + protoConfigs []string + schemas []*parsedProtoConfig + workingDir string +} + +func init() { + var err error + originalBaseDir, err = filepath.Abs(originalBaseDir) + if err != nil { + log.Fatal(err) + } +} + +func addToFlags(flags *pflag.FlagSet, opts *runOptions) { + flags.StringArrayVarP(&opts.protoConfigs, "schema", "s", []string{}, `source proto file(s), along with any message names. In the form "{proto package name}:{proto message names,}"`) + flags.StringVarP(&opts.outputDir, "output-dir", "o", relativeWorkingDir, "output directory, will default to the current directory") + flags.StringVarP(&opts.workingDir, "workdir", "w", relativeWorkingDir, "working directory, will default to the current directory") +} + +func validateDir(absPath string) error { + fi, err := os.Stat(absPath) + if err != nil { + return err + } else if !fi.IsDir() { + return fmt.Errorf(`Expected "%s" to be a directory`, absPath) + } + return nil +} + +func (o *runOptions) parse() error { + var err1, err2 error + o.absOutputDir, err1 = filepath.Abs(o.outputDir) + o.absWorkingDir, err2 = filepath.Abs(o.workingDir) + + if err := errors.Join(err1, err2); err != nil { + return err + } + + if err := errors.Join(validateDir(o.absOutputDir), validateDir(o.absWorkingDir)); err != nil { + return err + } + + o.schemas = make([]*parsedProtoConfig, len(o.protoConfigs)) + for i, s := range o.protoConfigs { + parts := strings.Split(s, ":") + if len(parts) != 2 { + return errors.New("invalid schema format") + } + protoPackageName := strings.TrimSpace(parts[0]) + if len(protoPackageName) == 0 { + return errors.New("invalid schema format") + } + protoMessageNames := make([]string, 0) + if len(parts[1]) > 0 { + protoMessageNames = strings.Split(parts[1], ",") + for i := range protoMessageNames { + protoMessageNames[i] = strings.TrimSpace(protoMessageNames[i]) + if len(protoMessageNames[i]) == 0 { + return errors.New("invalid schema format") + } + } + } + + o.schemas[i] = &parsedProtoConfig{ + protoPackageName: protoPackageName, + protoMessageNames: protoMessageNames, + } + } + + return nil +} + +// Command Generate serde assets for protobuf spec +func Command(opts *common.GeneralOptions) *cobra.Command { + runOptions := &runOptions{ + general: opts, + } + cmd := &cobra.Command{ + Use: "serde", + Short: "Generate serde assets for protobuf spec", + RunE: func(cmd *cobra.Command, _ []string) error { + return run(cmd, runOptions) + }, + SilenceUsage: true, + } + addToFlags(cmd.PersistentFlags(), runOptions) + return cmd +} + +func run(cmd *cobra.Command, opts *runOptions) error { + if err := errors.Join( + opts.general.Parse(), + opts.parse(), + ); err != nil { + return err + } + + logger := opts.general.Logger + + for _, cfg := range opts.schemas { + fileCfgs, err := nanopb.FindFiles(logger, opts.absWorkingDir) + if err != nil { + return err + } + + files, err := schema.Generate(cmd.Context(), logger, &schema.Config{ + Files: fileCfgs, + ProtoPackageName: cfg.protoPackageName, + ProtoMessageNames: cfg.protoMessageNames, + }) + if err != nil { + return err + } + + if err := common.WriteFilesToDirectory(logger, opts.absOutputDir, files); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/cmd/serde/serde_test.go b/pkg/cmd/serde/serde_test.go new file mode 100644 index 0000000..b16d4d4 --- /dev/null +++ b/pkg/cmd/serde/serde_test.go @@ -0,0 +1,161 @@ +package serde + +import ( + "errors" + "fmt" + "io/fs" + "jbpf_protobuf_cli/common" + "log" + "os" + "path/filepath" + "testing" + + _ "embed" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var workdir = os.Getenv("TEST_WORKDIR") +var snapshotdir = os.Getenv("SNAPSHOT_DIR") +var generateSnapshot = os.Getenv("REGENERATE_SNAPSHOT") == "true" +var generalOpts *common.GeneralOptions + +func init() { + logger := logrus.New() + logger.SetLevel(logrus.InfoLevel) + generalOpts = common.NewGeneralOptionsFromLogger(logger) + + if workdir == "" { + log.Fatal(`"TEST_WORKDIR" not set`) + } + if snapshotdir == "" { + log.Fatal(`"SNAPSHOT_DIR" not set`) + } + err := errors.Join( + verifyDirExists(workdir, false), + verifyDirExists(snapshotdir, true), + ) + if err != nil { + log.Fatal(err) + } + +} + +func verifyDirExists(dir string, createIfNotExists bool) error { + f, err := os.Stat(dir) + if err != nil && os.IsNotExist(err) && createIfNotExists { + if err := os.Mkdir(dir, 0755); err != nil { + return err + } + } else if err != nil { + return err + } else if !f.IsDir() { + return fmt.Errorf("%s is not a directory", dir) + } + return nil +} + +func snapshotTest(t *testing.T, snapshotDir, outDir string, cmd *cobra.Command) { + err := cmd.Execute() + require.NoError(t, err) + + cFiles, err := filepath.Glob(outDir + "/*.c") + require.NoError(t, err) + hFiles, err := filepath.Glob(outDir + "/*.h") + require.NoError(t, err) + pbFiles, err := filepath.Glob(outDir + "/*.pb") + require.NoError(t, err) + + outDirFiles := append(cFiles, hFiles...) + outDirFiles = append(outDirFiles, pbFiles...) + + for _, file := range outDirFiles { + baseName := filepath.Base(file) + snapshotFile := filepath.Join(snapshotDir, baseName) + + if generateSnapshot { + require.NoError(t, moveFile(file, snapshotFile)) + } else { + newFile, err := os.ReadFile(file) + require.NoError(t, err) + snapshotFile, err := os.ReadFile(snapshotFile) + require.NoError(t, err) + assert.Equal(t, snapshotFile, newFile, "file %s does not match snapshot", baseName) + } + } +} + +func moveFile(source, destination string) error { + fi, err := os.Stat(source) + if err != nil { + return err + } else if fi.IsDir() { + return fmt.Errorf("expected %s to be a file, got dir", source) + } + fileMod := fi.Mode() + data, err := os.ReadFile(source) + if err != nil { + return err + } + if err := os.Remove(source); err != nil { + return err + } + + newFi, err := os.Stat(destination) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return err + } else if err == nil && newFi.IsDir() { + return fmt.Errorf("expected %s to be a file, got dir", destination) + } else if err == nil { + if err := os.Remove(destination); err != nil { + return err + } + } + destFile, err := os.Create(destination) + if err != nil { + return err + } + defer func() { + if err := destFile.Close(); err != nil { + fmt.Printf("failed to close destination file %s when moving\n", destination) + } + }() + + n, err := destFile.Write(data) + if n != len(data) { + return fmt.Errorf("failed to write entire file %s, wrote %d of %d bytes, with err %v", destination, n, len(data), err) + } else if err != nil { + return err + } + + return os.Chmod(destination, fileMod) +} + +func TestCases(t *testing.T) { + testArgs := map[string][]string{ + "example1": {"-s", "example:req_resp,status", "-w", filepath.Join(workdir, "example1")}, + "example2": {"-s", "example2:item", "-w", filepath.Join(workdir, "example2")}, + "example3": {"-s", "example3:obj", "-w", filepath.Join(workdir, "example3")}, + } + + for exampleName, testArgs := range testArgs { + t.Run(exampleName, func(t *testing.T) { + outDir, err := os.MkdirTemp("", exampleName) + require.NoError(t, err) + defer func() { + if err := os.RemoveAll(outDir); err != nil { + t.Logf("failed to remove outDir: %s", err) + } + }() + snapshotDir := filepath.Join(snapshotdir, exampleName) + err = verifyDirExists(snapshotDir, true) + require.NoError(t, err) + cmd := Command(generalOpts) + cmd.SetArgs(append(testArgs, "-o", outDir)) + snapshotTest(t, snapshotDir, outDir, cmd) + }) + } +} diff --git a/pkg/common/codeletset_config.go b/pkg/common/codeletset_config.go new file mode 100644 index 0000000..fd0be50 --- /dev/null +++ b/pkg/common/codeletset_config.go @@ -0,0 +1,188 @@ +package common + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/google/uuid" +) + +// ProtobufConfig represents the configuration for a protobuf message +type ProtobufConfig struct { + MsgName string + PackageName string + PackagePath string +} + +func newProtobufConfig(cfg *ProtobufRawConfig) (*ProtobufConfig, error) { + if len(cfg.MsgName) == 0 { + return nil, fmt.Errorf("missing required field serde.protobuf.msg_name") + } + if len(cfg.PackagePath) == 0 { + return nil, fmt.Errorf("missing required field serde.protobuf.package_path") + } + + packagePath := os.ExpandEnv(cfg.PackagePath) + basename := filepath.Base(packagePath) + + return &ProtobufConfig{ + MsgName: cfg.MsgName, + PackageName: strings.TrimSuffix(basename, filepath.Ext(basename)), + PackagePath: packagePath, + }, nil +} + +// SerdeConfig represents the configuration for serialize/deserialize +type SerdeConfig struct { + Protobuf *ProtobufConfig +} + +func newSerdeConfig(cfg *SerdeRawConfig) (*SerdeConfig, error) { + if cfg.Protobuf == nil { + return nil, fmt.Errorf("missing required field serde.protobuf") + } + + protobuf, err := newProtobufConfig(cfg.Protobuf) + if err != nil { + return nil, err + } + + return &SerdeConfig{Protobuf: protobuf}, nil +} + +// IOChannelConfig represents the configuration for an IO channel +type IOChannelConfig struct { + Serde *SerdeConfig + StreamUUID uuid.UUID +} + +func newIOChannelConfig(cfg *IOChannelRawConfig) (*IOChannelConfig, error) { + if cfg.Serde == nil { + return nil, fmt.Errorf("missing required field serde") + } + + serde, err := newSerdeConfig(cfg.Serde) + if err != nil { + return nil, err + } + + streamUUID, err := uuid.Parse(cfg.StreamID) + if err != nil { + return nil, err + } + + return &IOChannelConfig{ + Serde: serde, + StreamUUID: streamUUID, + }, nil +} + +// CodeletDescriptorConfig represents the configuration for a codelet descriptor +type CodeletDescriptorConfig struct { + InIOChannel []*IOChannelConfig + OutIOChannel []*IOChannelConfig +} + +func newCodeletDescriptorConfig(cfg *CodeletDescriptorRawConfig) (*CodeletDescriptorConfig, error) { + inIOChannel := make([]*IOChannelConfig, 0, len(cfg.InIOChannel)) + for _, rawIO := range cfg.InIOChannel { + io, err := newIOChannelConfig(rawIO) + if err != nil { + return nil, err + } + inIOChannel = append(inIOChannel, io) + } + + outIOChannel := make([]*IOChannelConfig, 0, len(cfg.OutIOChannel)) + for _, rawIO := range cfg.OutIOChannel { + io, err := newIOChannelConfig(rawIO) + if err != nil { + return nil, err + } + outIOChannel = append(outIOChannel, io) + } + + return &CodeletDescriptorConfig{ + InIOChannel: inIOChannel, + OutIOChannel: outIOChannel, + }, nil +} + +// CodeletsetConfig represents the configuration for loading a decoder +type CodeletsetConfig struct { + CodeletDescriptor []*CodeletDescriptorConfig +} + +func newCodeletSetConfig(cfg *CodeletsetRawConfig) (*CodeletsetConfig, error) { + codeletDescriptors := make([]*CodeletDescriptorConfig, 0, len(cfg.CodeletDescriptor)) + for _, rawDesc := range cfg.CodeletDescriptor { + desc, err := newCodeletDescriptorConfig(rawDesc) + if err != nil { + return nil, err + } + codeletDescriptors = append(codeletDescriptors, desc) + } + return &CodeletsetConfig{CodeletDescriptor: codeletDescriptors}, nil +} + +// LoadCompiledProtos loads the compiled protobuf files from the codeletset config +func LoadCompiledProtos(cfgs []*CodeletsetConfig, includeInIO, includeOutIO bool) (map[string]*File, error) { + compiledProtos := make(map[string]*File) + + for _, c := range cfgs { + for _, desc := range c.CodeletDescriptor { + if includeInIO { + for _, io := range desc.InIOChannel { + if _, ok := compiledProtos[io.Serde.Protobuf.PackagePath]; !ok { + protoPkg, err := NewFile(io.Serde.Protobuf.PackagePath) + if err != nil { + return nil, err + } + compiledProtos[io.Serde.Protobuf.PackagePath] = protoPkg + } + } + } + + if includeOutIO { + for _, io := range desc.OutIOChannel { + if _, ok := compiledProtos[io.Serde.Protobuf.PackagePath]; !ok { + protoPkg, err := NewFile(io.Serde.Protobuf.PackagePath) + if err != nil { + return nil, err + } + compiledProtos[io.Serde.Protobuf.PackagePath] = protoPkg + } + } + } + } + } + + return compiledProtos, nil +} + +// CodeletsetConfigFromFiles reads and unmarshals the given files into a slice of CodeletsetConfig +func CodeletsetConfigFromFiles(configs ...string) ([]*CodeletsetConfig, error) { + out := make([]*CodeletsetConfig, 0, len(configs)) + errs := make([]error, 0, len(configs)) + + for _, c := range configs { + rawConfig, err := newCodeletsetRawConfig(c) + if err != nil { + errs = append(errs, err) + continue + } + + config, err := newCodeletSetConfig(rawConfig) + if err != nil { + errs = append(errs, fmt.Errorf("failed to unpack file %s: %w", c, err)) + continue + } + + out = append(out, config) + } + + return out, errors.Join(errs...) +} diff --git a/pkg/common/codeletset_raw_config.go b/pkg/common/codeletset_raw_config.go new file mode 100644 index 0000000..8a13823 --- /dev/null +++ b/pkg/common/codeletset_raw_config.go @@ -0,0 +1,49 @@ +package common + +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + +// ProtobufRawConfig represents the configuration for a protobuf message as defined in the yaml config +type ProtobufRawConfig struct { + MsgName string `yaml:"msg_name"` + PackagePath string `yaml:"package_path"` +} + +// SerdeRawConfig represents the configuration for serialize/deserialize as defined in the yaml config +type SerdeRawConfig struct { + Protobuf *ProtobufRawConfig `yaml:"protobuf"` +} + +// IOChannelRawConfig represents the configuration for an IO channel as defined in the yaml config +type IOChannelRawConfig struct { + Serde *SerdeRawConfig `yaml:"serde"` + StreamID string `yaml:"stream_id"` +} + +// CodeletDescriptorRawConfig represents the configuration for a codelet descriptor as defined in the yaml config +type CodeletDescriptorRawConfig struct { + InIOChannel []*IOChannelRawConfig `yaml:"in_io_channel"` + OutIOChannel []*IOChannelRawConfig `yaml:"out_io_channel"` +} + +// CodeletsetRawConfig represents the configuration for loading a decoder as defined in the yaml config +type CodeletsetRawConfig struct { + CodeletDescriptor []*CodeletDescriptorRawConfig `yaml:"codelet_descriptor"` +} + +func newCodeletsetRawConfig(filePath string) (*CodeletsetRawConfig, error) { + f, err := NewFile(filePath) + if err != nil { + return nil, fmt.Errorf("failed to read file %s: %w", filePath, err) + } + + var rawConfig CodeletsetRawConfig + if err := yaml.Unmarshal(f.Data, &rawConfig); err != nil { + return nil, fmt.Errorf("failed to unmarshal file %s: %w", filePath, err) + } + + return &rawConfig, nil +} diff --git a/pkg/common/file.go b/pkg/common/file.go new file mode 100644 index 0000000..05e9374 --- /dev/null +++ b/pkg/common/file.go @@ -0,0 +1,95 @@ +package common + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + + "github.com/sirupsen/logrus" +) + +// File represents a generated file +type File struct { + Data []byte + Mode fs.FileMode + Name string +} + +// NewFile creates a new file from a file path +func NewFile(filePath string) (*File, error) { + filePath, err := filepath.Abs(filePath) + if err != nil { + return nil, err + } + + fi, err := os.Stat(filePath) + if err != nil { + return nil, err + } else if fi.IsDir() { + return nil, fmt.Errorf(`expected "%s" to be a file, got a directory`, filePath) + } + + content, err := os.ReadFile(filePath) + if err != nil { + return nil, err + } + + return &File{ + Data: content, + Mode: fi.Mode(), + Name: filepath.Base(filePath), + }, nil +} + +// WriteFilesToDirectory writes files to a directory +func WriteFilesToDirectory(logger *logrus.Logger, outputDirectory string, files []*File) error { + for _, f := range files { + if err := WriteFileToDirectory(logger, outputDirectory, f); err != nil { + return err + } + } + return nil +} + +// WriteFileToDirectory writes a file to a directory +func WriteFileToDirectory(logger *logrus.Logger, outputDirectory string, file *File) error { + filePath := filepath.Join(outputDirectory, file.Name) + + l := logger.WithField("filename", filePath) + fi, err := os.Stat(filePath) + var f *os.File + if err == nil && fi.IsDir() { + return fmt.Errorf(`"%s" is a directory`, filePath) + } else if !os.IsNotExist(err) { + l.Debug("Overwriting existing file") + if err := os.Remove(filePath); err != nil { + return err + } + } else if err == nil { + l.Debug("Creating file") + } + f, err = os.Create(filePath) + if err != nil { + return err + } + + defer func() { + if err := f.Close(); err != nil { + l.WithError(err).Error("failed to close file") + } + }() + + n, err := f.Write(file.Data) + if err != nil { + return err + } else if n != len(file.Data) { + return fmt.Errorf("expected to write %d bytes, wrote %d", len(file.Data), n) + } + + if err := os.Chmod(filePath, file.Mode); err != nil { + return err + } + + return nil +} diff --git a/pkg/common/options.go b/pkg/common/options.go new file mode 100644 index 0000000..2968e41 --- /dev/null +++ b/pkg/common/options.go @@ -0,0 +1,96 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package common + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" +) + +// NewGeneralOptions creates a new GeneralOptions with default values +func NewGeneralOptions(flags *pflag.FlagSet) *GeneralOptions { + opts := &GeneralOptions{} + opts.addToFlags(flags) + return opts +} + +// NewGeneralOptionsFromLogger creates a new GeneralOptions from a logger +func NewGeneralOptionsFromLogger(logger *logrus.Logger) *GeneralOptions { + opts := &GeneralOptions{ + file: "", + formatter: "TextFormatter", + Logger: logger, + logLevel: logger.Level.String(), + reportCaller: logger.ReportCaller, + } + return opts +} + +// GeneralOptions contains the general options for the jbpf cli +type GeneralOptions struct { + file string + formatter string + logLevel string + reportCaller bool + + Logger *logrus.Logger +} + +func (opts *GeneralOptions) addToFlags(flags *pflag.FlagSet) { + flags.BoolVar(&opts.reportCaller, "log-report-caller", false, "show report caller in logs") + flags.StringVar(&opts.file, "log-file", "", "if set, will write logs to file as well as terminal") + flags.StringVar(&opts.formatter, "log-formatter", "TextFormatter", "logger formatter, set to UncoloredTextFormatter, JSONFormatter or TextFormatter") + flags.StringVar(&opts.logLevel, "log-level", "info", "log level, set to: panic, fatal, error, warn, info, debug or trace") +} + +// Parse will process and validate args +func (opts *GeneralOptions) Parse() error { + var err1, err2 error + opts.Logger, err1 = opts.getLogger() + return errors.Join(err1, err2) +} + +// GetLogger returns a logger based on the options +func (opts *GeneralOptions) getLogger() (*logrus.Logger, error) { + logLev, err := logrus.ParseLevel(opts.logLevel) + if err != nil { + return nil, err + } + + var formatter logrus.Formatter + switch strings.ToLower(opts.formatter) { + case "uncoloredtextformatter": + formatter = new(UncoloredTextFormatter) + case "jsonformatter": + formatter = new(logrus.JSONFormatter) + case "textformatter": + formatter = new(logrus.TextFormatter) + default: + return nil, fmt.Errorf("invalid log formatter: %v", opts.formatter) + } + + var out io.Writer = os.Stderr + + if opts.file != "" { + file, err := os.OpenFile(opts.file, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + return nil, err + } + out = io.MultiWriter(os.Stderr, file) + } + + return &logrus.Logger{ + Out: out, + Formatter: formatter, + Hooks: make(logrus.LevelHooks), + Level: logLev, + ExitFunc: os.Exit, + ReportCaller: opts.reportCaller, + }, nil +} diff --git a/pkg/common/subproc.go b/pkg/common/subproc.go new file mode 100644 index 0000000..4252f95 --- /dev/null +++ b/pkg/common/subproc.go @@ -0,0 +1,30 @@ +package common + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/sirupsen/logrus" +) + +// RunSubprocess runs a subprocess +func RunSubprocess(ctx context.Context, logger *logrus.Logger, name string, args ...string) error { + l := logger.WithFields(logrus.Fields{ + "cmd": strings.Join(append([]string{name}, args...), " "), + }) + l.Debug("Creating subprocess") + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + l.Debug("Running subprocess") + cmd.Stderr = logger.WithField("channel", "stderr").WriterLevel(logrus.DebugLevel) + cmd.Stdout = logger.WithField("channel", "stdout").WriterLevel(logrus.DebugLevel) + if err := cmd.Run(); err != nil { + return errors.Join(err, fmt.Errorf("failed to run command")) + } + l.Debug("Complete subprocess") + return nil +} diff --git a/pkg/common/uncolored_text_formatter.go b/pkg/common/uncolored_text_formatter.go new file mode 100644 index 0000000..9dbf8b9 --- /dev/null +++ b/pkg/common/uncolored_text_formatter.go @@ -0,0 +1,266 @@ +package common + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/sirupsen/logrus" +) + +const ( + defaultTimestampFormat = time.RFC3339 +) + +var ( + baseTimestamp time.Time + levelTextMaxLength int +) + +func init() { + baseTimestamp = time.Now() + + for _, level := range logrus.AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > levelTextMaxLength { + levelTextMaxLength = levelTextLength + } + } +} + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// UncoloredTextFormatter formats logs into text +type UncoloredTextFormatter struct { + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &UncoloredTextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) +} + +// Format renders a single log entry +func (f *UncoloredTextFormatter) Format(entry *logrus.Entry) ([]byte, error) { + data := make(logrus.Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + f.printToBuf(b, entry, keys, data, timestampFormat) + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *UncoloredTextFormatter) printToBuf(b *bytes.Buffer, entry *logrus.Entry, keys []string, data logrus.Fields, timestampFormat string) { + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation && !f.PadLevelText { + levelText = levelText[0:4] + } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "%s%s %-44s ", levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "%s[%04d]%s %-44s ", levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "%s[%s]%s %-44s ", levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " %s=", k) + f.appendValue(b, v) + } +} + +func (f *UncoloredTextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + if f.DisableQuote { + return false + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *UncoloredTextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} + +func prefixFieldClashes(data logrus.Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(logrus.FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(logrus.FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(logrus.FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(logrus.FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(logrus.FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(logrus.FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/pkg/data/server.go b/pkg/data/server.go new file mode 100644 index 0000000..ec8f845 --- /dev/null +++ b/pkg/data/server.go @@ -0,0 +1,115 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package data + +import ( + context "context" + "errors" + "fmt" + "jbpf_protobuf_cli/schema" + "net" + "os" + "os/signal" + "syscall" + "time" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +const ( + dataReadDeadline = 1 * time.Second + decoderChanSize = 100 +) + +// Server is a server that implements the DynamicDecoderServer interface +type Server struct { + ctx context.Context + logger *logrus.Logger + opts *ServerOptions + store *schema.Store +} + +// NewServer returns a new Server +func NewServer(ctx context.Context, logger *logrus.Logger, opts *ServerOptions, store *schema.Store) (*Server, error) { + return &Server{ + ctx: ctx, + logger: logger, + opts: opts, + store: store, + }, nil +} + +// Listen starts the server +func (s *Server) Listen(onData func(uuid.UUID, []byte)) error { + data, err := net.ListenPacket(dataScheme, fmt.Sprintf("%s:%d", s.opts.dataIP, s.opts.dataPort)) + if err != nil { + return err + } + s.logger.WithField("addr", data.LocalAddr().Network()+"://"+data.LocalAddr().String()).Debug("starting data server") + defer func() { + s.logger.WithField("addr", data.LocalAddr().Network()+"://"+data.LocalAddr().String()).Debug("stopping data server") + if err := data.Close(); err != nil { + s.logger.WithError(err).Errorf("error closing data server") + } + }() + + stopper := make(chan os.Signal, 1) + signal.Notify(stopper, os.Interrupt, syscall.SIGTERM, syscall.SIGINT) + + for { + select { + case <-stopper: + return nil + case <-s.ctx.Done(): + return nil + + default: + buffer := make([]byte, s.opts.dataBufferSize) + if err := data.SetReadDeadline(time.Now().Add(dataReadDeadline)); err != nil { + return err + } + n, _, err := data.ReadFrom(buffer) + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + continue + } + if err != nil { + return errors.Join(err, fmt.Errorf("error reading from UDP socket")) + } + + if n < 16 { + s.logger.Warnf("received data is less than %d bytes, skipping", 16) + continue + } + + streamUUID, err := uuid.FromBytes(buffer[:16]) + if err != nil { + s.logger.WithError(err).Error("error parsing stream UUID") + continue + } + + msg, err := s.store.GetProtoMsgInstance(streamUUID) + if err != nil { + s.logger.WithError(err).Error("error creating instance of proto message") + continue + } + + err = proto.Unmarshal(buffer[16:n], msg) + if err != nil { + s.logger.WithError(err).Error("error unmarshalling payload") + continue + } + + res, err := protojson.Marshal(msg) + if err != nil { + s.logger.WithError(err).Error("error marshalling message to JSON") + continue + } + + onData(streamUUID, res) + } + } + +} diff --git a/pkg/data/server_options.go b/pkg/data/server_options.go new file mode 100644 index 0000000..e129245 --- /dev/null +++ b/pkg/data/server_options.go @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package data + +import ( + "fmt" + "net/url" + + "github.com/spf13/pflag" +) + +const ( + dataPrefix = "decoder-data" + dataScheme = "udp" + defaultDataBufferSize = 1<<16 - 1 + defaultDataIP = "" + defaultDataPort = uint16(20788) +) + +// ServerOptions is the options for the decoder server +type ServerOptions struct { + dataBufferSize uint16 + dataIP string + dataPort uint16 +} + +// AddServerOptionsToFlags adds the server options to the flags +func AddServerOptionsToFlags(flags *pflag.FlagSet, opts *ServerOptions) { + if opts == nil { + return + } + + flags.StringVar(&opts.dataIP, dataPrefix+"-ip", defaultDataIP, "IP address of the data UDP server") + flags.Uint16Var(&opts.dataBufferSize, dataPrefix+"-buffer", defaultDataBufferSize, "buffer size for the data UDP server") + flags.Uint16Var(&opts.dataPort, dataPrefix+"-port", defaultDataPort, "port address of the data UDP server") +} + +// Parse parses the server options +func (o *ServerOptions) Parse() error { + _, err := url.ParseRequestURI(fmt.Sprintf("%s://%s:%d", dataScheme, o.dataIP, o.dataPort)) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/generator/nanopb/files.go b/pkg/generator/nanopb/files.go new file mode 100644 index 0000000..0f7e6d6 --- /dev/null +++ b/pkg/generator/nanopb/files.go @@ -0,0 +1,54 @@ +package nanopb + +import ( + "errors" + "jbpf_protobuf_cli/common" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" +) + +const ( + optionsGlob = "*.options" + protosGlob = "*.proto" +) + +// FindFiles finds nanopb files (*.proto, *.options) in a directory +func FindFiles(logger *logrus.Logger, workingDir string) ([]*common.File, error) { + optionsFiles, err1 := filepath.Glob(filepath.Join(workingDir, optionsGlob)) + protoFiles, err2 := filepath.Glob(filepath.Join(workingDir, protosGlob)) + if err := errors.Join(err1, err2); err != nil { + return nil, err + } + + files := make([]*common.File, 0, len(optionsFiles)+len(protoFiles)) + fileNames := make([]string, 0, len(optionsFiles)+len(protoFiles)) + + for _, f := range optionsFiles { + file, err := common.NewFile(f) + if err != nil { + return nil, err + } + files = append(files, file) + fileNames = append(fileNames, filepath.Base(f)) + } + for _, f := range protoFiles { + file, err := common.NewFile(f) + if err != nil { + return nil, err + } + files = append(files, file) + fileNames = append(fileNames, filepath.Base(f)) + } + + if len(files) == 0 { + return nil, errors.New("no nanopb files found") + } else if len(protoFiles) == 0 { + return nil, errors.New("no proto file found") + } + + logger.WithField("files", strings.Join(fileNames, ", ")).Debug("found nanopb files") + + return files, nil +} diff --git a/pkg/generator/nanopb/nanopb.go b/pkg/generator/nanopb/nanopb.go new file mode 100644 index 0000000..33790f3 --- /dev/null +++ b/pkg/generator/nanopb/nanopb.go @@ -0,0 +1,54 @@ +package nanopb + +import ( + "fmt" + "log" + "os" +) + +const ( + nanoPbEnvVar = "NANO_PB" +) + +var ( + // GeneratorPath is $NANO_PB/generator/nanopb_generator + GeneratorPath string + // ProtocPath is $NANO_PB/generator/protoc + ProtocPath string + // Path is $NANO_PB + Path string + // PbCommonCPath is $NANO_PB/pb_common.c + PbCommonCPath string + // PbDecodeCPath is $NANO_PB/pb_decode.c + PbDecodeCPath string + // PbEncodeCPath is $NANO_PB/pb_encode.c + PbEncodeCPath string +) + +func init() { + Path = os.Getenv(nanoPbEnvVar) + + if err := validateDirPath(Path); err != nil { + log.Fatal(err) + } + + ProtocPath = fmt.Sprintf("%s/generator/protoc", Path) + GeneratorPath = fmt.Sprintf("%s/generator/nanopb_generator", Path) + PbCommonCPath = fmt.Sprintf("%s/pb_common.c", Path) + PbDecodeCPath = fmt.Sprintf("%s/pb_decode.c", Path) + PbEncodeCPath = fmt.Sprintf("%s/pb_encode.c", Path) +} + +func validateDirPath(path string) error { + if path == "" { + return nil + } + fi, err := os.Stat(path) + if err != nil { + return err + } + if !fi.IsDir() { + return fmt.Errorf(`Expected "%s" to be a directory`, path) + } + return nil +} diff --git a/pkg/generator/schema/schema.go b/pkg/generator/schema/schema.go new file mode 100644 index 0000000..4d73da7 --- /dev/null +++ b/pkg/generator/schema/schema.go @@ -0,0 +1,112 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "jbpf_protobuf_cli/common" + "jbpf_protobuf_cli/generator/nanopb" + "jbpf_protobuf_cli/generator/stream" + "os" + + "github.com/sirupsen/logrus" +) + +const ( + pbTemplate = "%s.pb" +) + +var ( + generatedFileTemplate = []string{pbTemplate, "%s.pb.c", "%s.pb.h"} +) + +// Config for schema file generation +type Config struct { + Files []*common.File + ProtoMessageNames []string + ProtoPackageName string +} + +// Generate generates files for schema inside a temporary directory +func Generate(ctx context.Context, logger *logrus.Logger, cfg *Config) ([]*common.File, error) { + wd, err := os.MkdirTemp("", "temp*") + if err != nil { + return nil, err + } + defer func() { + if err = os.RemoveAll(wd); err != nil { + logger.WithField("directory", wd).WithError(err).Error("failed to remove working directory") + } + }() + + originalWd, err := os.Getwd() + if err != nil { + return nil, err + } + if err := os.Chdir(wd); err != nil { + return nil, err + } + defer func() { + if err := os.Chdir(originalWd); err != nil { + logger.WithField("directory", wd).WithError(err).Error("failed to change working directory") + } + }() + + for _, fileDetails := range cfg.Files { + logger.Debug("Writing file: ", fileDetails.Name) + f, err := os.Create(fileDetails.Name) + if err != nil { + return nil, err + } + n, err := f.Write(fileDetails.Data) + if n != len(fileDetails.Data) { + err = errors.Join(err, fmt.Errorf("expected to write %d bytes, wrote %d", len(fileDetails.Data), n)) + } + if err != nil { + return nil, errors.Join(err, f.Close()) + } + logger.Debug("Closed file: ", fileDetails.Name) + if err = f.Close(); err != nil { + return nil, err + } + } + + if err := errors.Join( + common.RunSubprocess( + ctx, + logger, + nanopb.GeneratorPath, + cfg.ProtoPackageName+".proto", + ), + common.RunSubprocess( + ctx, + logger, + nanopb.ProtocPath, + cfg.ProtoPackageName+".proto", + "-o", + fmt.Sprintf(pbTemplate, cfg.ProtoPackageName), + )); err != nil { + return nil, err + } + + generatedFiles := make([]*common.File, 0, len(cfg.ProtoMessageNames)*2+3) + + for _, fTemplate := range generatedFileTemplate { + f := fmt.Sprintf(fTemplate, cfg.ProtoPackageName) + fileData, err := common.NewFile(f) + if err != nil { + return nil, err + } + generatedFiles = append(generatedFiles, fileData) + } + + for _, protoMessageName := range cfg.ProtoMessageNames { + files, err := stream.Generate(ctx, logger, cfg.ProtoPackageName, protoMessageName) + if err != nil { + return nil, err + } + generatedFiles = append(generatedFiles, files...) + } + + return generatedFiles, nil +} diff --git a/pkg/generator/stream/_serializer.c.tpl b/pkg/generator/stream/_serializer.c.tpl new file mode 100644 index 0000000..b5c6183 --- /dev/null +++ b/pkg/generator/stream/_serializer.c.tpl @@ -0,0 +1,26 @@ +#define PB_FIELD_32BIT 1 +#include +#include +#include +#include "{{ .ProtoPackageName }}.pb.h" + +const uint32_t proto_message_size = sizeof({{ .ProtoMessageName }}); + +int jbpf_io_serialize(void* input_msg_buf, size_t input_msg_buf_size, char* serialized_data_buf, size_t serialized_data_buf_size) { + if (input_msg_buf_size != proto_message_size) + return -1; + + pb_ostream_t ostream = pb_ostream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + if (!pb_encode(&ostream, {{ .ProtoMessageName }}_fields, input_msg_buf)) + return -1; + + return ostream.bytes_written; +} + +int jbpf_io_deserialize(char* serialized_data_buf, size_t serialized_data_buf_size, void* output_msg_buf, size_t output_msg_buf_size) { + if (output_msg_buf_size != proto_message_size) + return 0; + + pb_istream_t istream = pb_istream_from_buffer((uint8_t*)serialized_data_buf, serialized_data_buf_size); + return pb_decode(&istream, {{ .ProtoMessageName }}_fields, output_msg_buf); +} diff --git a/pkg/generator/stream/stream.go b/pkg/generator/stream/stream.go new file mode 100644 index 0000000..c026b12 --- /dev/null +++ b/pkg/generator/stream/stream.go @@ -0,0 +1,88 @@ +package stream + +import ( + "context" + "errors" + "fmt" + "jbpf_protobuf_cli/common" + "jbpf_protobuf_cli/generator/nanopb" + "os" + "text/template" + + "github.com/sirupsen/logrus" +) + +const ( + defaultPbField32Bit = "1" + envVarPbField32Bit = "PB_FIELD_32BIT" + envVarPbMaxRequiredFields = "PB_MAX_REQUIRED_FIELDS" + serializerC = "%s:%s_serializer.c" + serializerSO = "%s:%s_serializer.so" +) + +func createNewFileWithTmpl(logger *logrus.Logger, filename string, tmpl *template.Template, data SerializerTemplateData) error { + l := logger.WithField("filename", filename) + + f, err := os.Create(filename) + if err != nil { + return err + } + defer func() { + if err := f.Close(); err != nil { + l.WithError(err).Error("Failed to close file") + } + }() + l.Debug("Created file") + err = tmpl.Execute(f, data) + if err != nil { + l.WithError(err).Error("Failed to write to file") + return err + } + l.Debug("Successfully written to file") + return nil +} + +// Generate creates files for a stream +func Generate(ctx context.Context, logger *logrus.Logger, protoPackageName, protoMessageName string) ([]*common.File, error) { + cFile := fmt.Sprintf(serializerC, protoPackageName, protoMessageName) + soFile := fmt.Sprintf(serializerSO, protoPackageName, protoMessageName) + + if err := createNewFileWithTmpl(logger, + cFile, + serializerTemplate, + SerializerTemplateData{ProtoPackageName: protoPackageName, ProtoMessageName: protoMessageName}, + ); err != nil { + return nil, err + } + + pbField32Bit := os.Getenv(envVarPbField32Bit) + if pbField32Bit == "" { + pbField32Bit = defaultPbField32Bit + } + args := []string{ + "-I", + nanopb.Path, + cFile, + protoPackageName + ".pb.c", + nanopb.PbCommonCPath, + nanopb.PbDecodeCPath, + nanopb.PbEncodeCPath, + "-DPB_FIELD_32BIT=" + pbField32Bit, + } + if pbMaxRequiredFields := os.Getenv(envVarPbMaxRequiredFields); len(pbMaxRequiredFields) > 0 { + args = append(args, "-DPB_MAX_REQUIRED_FIELDS="+pbMaxRequiredFields) + } + args = append(args, "-shared", "-fPIC", "-o", soFile) + + if err := common.RunSubprocess(ctx, logger, "cc", args...); err != nil { + return nil, err + } + + cFileData, err1 := common.NewFile(cFile) + soFileData, err2 := common.NewFile(soFile) + if err := errors.Join(err1, err2); err != nil { + return nil, err + } + + return []*common.File{cFileData, soFileData}, nil +} diff --git a/pkg/generator/stream/templates.go b/pkg/generator/stream/templates.go new file mode 100644 index 0000000..982e763 --- /dev/null +++ b/pkg/generator/stream/templates.go @@ -0,0 +1,28 @@ +package stream + +import ( + _ "embed" + "log" + "text/template" +) + +//go:embed _serializer.c.tpl +var tpl string + +var serializerTemplate *template.Template + +// SerializerTemplateData is the data passed to the serializer template +type SerializerTemplateData struct { + ProtoMessageName string + ProtoPackageName string +} + +func init() { + var err error + + serializerTemplate, err = template.New("serializerTemplate").Funcs(nil).Parse(tpl) + + if err != nil { + log.Fatal(err) + } +} diff --git a/pkg/go.mod b/pkg/go.mod new file mode 100644 index 0000000..a2ccced --- /dev/null +++ b/pkg/go.mod @@ -0,0 +1,21 @@ +module jbpf_protobuf_cli + +go 1.23.2 + +require ( + github.com/google/uuid v1.6.0 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.8.1 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.9.0 + golang.org/x/sync v0.8.0 + google.golang.org/protobuf v1.35.1 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect +) diff --git a/pkg/go.sum b/pkg/go.sum new file mode 100644 index 0000000..9452956 --- /dev/null +++ b/pkg/go.sum @@ -0,0 +1,36 @@ +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/jbpf/client.go b/pkg/jbpf/client.go new file mode 100644 index 0000000..a9f2f94 --- /dev/null +++ b/pkg/jbpf/client.go @@ -0,0 +1,91 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package jbpf + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/sirupsen/logrus" +) + +// Client is a TCP socket client +type Client struct { + conn *net.TCPConn + logger *logrus.Logger + opts *Options +} + +// NewClient creates a new socket client +func NewClient(logger *logrus.Logger, opts *Options) (*Client, error) { + c := &Client{ + logger: logger, + opts: opts, + } + if err := c.connect(); err != nil { + return nil, err + } + return c, nil +} + +func (c *Client) connect() error { + conn, err := net.Dial(scheme, fmt.Sprintf("%s:%d", c.opts.ip, c.opts.port)) + if err != nil { + return err + } + + tcpc, ok := conn.(*net.TCPConn) + if !ok { + return fmt.Errorf("expected a tcp connection") + } + + if c.opts.keepAlivePeriod != 0 { + if err := tcpc.SetKeepAlive(true); err != nil { + return err + } + if err := tcpc.SetKeepAlivePeriod(c.opts.keepAlivePeriod); err != nil { + return err + } + } + + c.conn = tcpc + return nil +} + +// Write writes data to the socket +func (c *Client) Write(bs []byte) error { + if c.conn == nil { + if err := c.connect(); err != nil { + return err + } + } + + lengthField := make([]byte, 2) + binary.LittleEndian.PutUint16(lengthField, uint16(len(bs))) + + if _, err := c.conn.Write(append(lengthField, bs...)); err != nil { + var netErr net.Error + if errors.As(err, &netErr) { + if err := c.Close(); err != nil { + c.logger.WithError(err).Error("failed to close connection") + } + c.conn = nil + return fmt.Errorf("closing connection: %w", netErr) + } + return err + } + + return nil +} + +// Close closes the connection +func (c *Client) Close() error { + if c.conn == nil { + return nil + } + err := c.conn.Close() + c.conn = nil + return err +} diff --git a/pkg/jbpf/options.go b/pkg/jbpf/options.go new file mode 100644 index 0000000..5a981f9 --- /dev/null +++ b/pkg/jbpf/options.go @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package jbpf + +import ( + "fmt" + "net/url" + "time" + + "github.com/spf13/pflag" +) + +const ( + defaultIP = "0.0.0.0" + defaultPort = uint16(20787) + optionsPrefix = "jbpf" + scheme = "tcp" +) + +// Options is the options for the jbpf client +type Options struct { + ip string + keepAlivePeriod time.Duration + port uint16 +} + +// AddOptionsToFlags adds the options to the flags +func AddOptionsToFlags(flags *pflag.FlagSet, opts *Options) { + if opts == nil { + return + } + + flags.DurationVar(&opts.keepAlivePeriod, optionsPrefix+"-keep-alive", 0, "time to keep alive the connection") + flags.StringVar(&opts.ip, optionsPrefix+"-ip", defaultIP, "IP address of the jbpf TCP server") + flags.Uint16Var(&opts.port, optionsPrefix+"-port", defaultPort, "port address of the jbpf TCP server") +} + +// Parse parses the options +func (o *Options) Parse() error { + _, err := url.ParseRequestURI(fmt.Sprintf("%s://%s:%d", scheme, o.ip, o.port)) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/main.go b/pkg/main.go new file mode 100644 index 0000000..1314376 --- /dev/null +++ b/pkg/main.go @@ -0,0 +1,34 @@ +package main + +import ( + "context" + "jbpf_protobuf_cli/cmd/decoder" + "jbpf_protobuf_cli/cmd/input" + "jbpf_protobuf_cli/cmd/serde" + "jbpf_protobuf_cli/common" + "os" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" +) + +func main() { + ctx := context.Background() + if err := cli().ExecuteContext(ctx); err != nil { + logrus.WithError(err).Fatal("Exiting") + } +} + +func cli() *cobra.Command { + cmd := &cobra.Command{ + Use: os.Args[0], + Long: "jbpf companion command line tool to generate protobuf assets. Includes a decoder to receive output data over a UDP socket from a jbpf instance. Messages are then decoded and print as json. Also provides a mechanism to dispatch input control messages to a jbpf instance via a TCP socket.", + } + opts := common.NewGeneralOptions(cmd.PersistentFlags()) + cmd.AddCommand( + decoder.Command(opts), + input.Command(opts), + serde.Command(opts), + ) + return cmd +} diff --git a/pkg/schema/client.go b/pkg/schema/client.go new file mode 100644 index 0000000..be87c72 --- /dev/null +++ b/pkg/schema/client.go @@ -0,0 +1,176 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package schema + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" +) + +// Client encapsulates the decoder client +type Client struct { + baseURL string + ctx context.Context + inner *http.Client + logger *logrus.Logger +} + +// NewClient creates a new Client +func NewClient(ctx context.Context, logger *logrus.Logger, opts *Options) (*Client, error) { + ip := opts.ip + if len(ip) == 0 { + ip = "localhost" + } + + return &Client{ + baseURL: fmt.Sprintf("%s://%s:%d", controlScheme, ip, opts.port), + ctx: ctx, + inner: &http.Client{}, + logger: logger, + }, nil +} + +func (c *Client) doPost(relativePath string, input interface{}) error { + jsonData, err := json.Marshal(input) + if err != nil { + return err + } + + var req *http.Request + req, err = http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", c.baseURL, relativePath), bytes.NewReader(jsonData)) + if err != nil { + return err + } + + resp, err := c.inner.Do(req) + if err != nil { + c.logger.WithError(err).Error("http request failed") + return err + } + + buf := new(strings.Builder) + _, err = io.Copy(buf, resp.Body) + if err != nil { + return err + } + + if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices { + err := fmt.Errorf("unexpected status code: %d", resp.StatusCode) + c.logger.WithField("body", buf.String()).WithError(err).Error("unexpected status code") + return err + } + + return nil +} + +func (c *Client) doDelete(relativePath string) error { + var req *http.Request + var err error + req, err = http.NewRequest(http.MethodDelete, fmt.Sprintf("%s%s", c.baseURL, relativePath), nil) + if err != nil { + return err + } + + resp, err := c.inner.Do(req) + if err != nil { + c.logger.WithError(err).Error("http request failed") + return err + } + + buf := new(strings.Builder) + _, err = io.Copy(buf, resp.Body) + if err != nil { + return err + } + + if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices { + err := fmt.Errorf("unexpected status code: %d", resp.StatusCode) + c.logger.WithField("body", buf.String()).WithError(err).Error("unexpected status code") + return err + } + + return nil +} + +// LoadRequest is a request to load a schema and stream +type LoadRequest struct { + CompiledProto []byte + Streams map[uuid.UUID]string +} + +// Load loads the schemas into the decoder +func (c *Client) Load(schemas map[string]*LoadRequest) error { + errs := make([]error, 0, len(schemas)) + + for protoPackageName, req := range schemas { + l := c.logger.WithFields(logrus.Fields{"pkg": protoPackageName}) + + if err := c.doPost("/schema", &UpsertSchemaRequest{ProtoDescriptor: req.CompiledProto}); err != nil { + err = fmt.Errorf("failed to upsert proto package %s: %w", protoPackageName, err) + errs = append(errs, err) + continue + } + + l.Info("successfully upserted proto package") + + for streamUUID, protoMsg := range req.Streams { + err := c.doPost("/stream", &AddSchemaAssociationRequest{StreamUUID: streamUUID, ProtoPackage: protoPackageName, ProtoMessage: protoMsg}) + if err != nil { + err = fmt.Errorf("failed to associate streamID %s to proto package %s and message %s: %w", streamUUID.String(), protoPackageName, protoMsg, err) + errs = append(errs, err) + continue + } + + l.WithFields(logrus.Fields{ + "protoMsg": protoMsg, + "protoPackageName": protoPackageName, + "streamId": streamUUID.String(), + }).Info("successfully associated stream ID with proto package") + } + } + + return errors.Join(errs...) +} + +// SendControl dispatches a control message to the decoder +func (c *Client) SendControl(streamUUID uuid.UUID, jdata string) error { + if err := c.doPost("/control", &SendControlRequest{StreamUUID: streamUUID, Payload: jdata}); err != nil { + return fmt.Errorf("failed to send control message %s: %w", streamUUID.String(), err) + } + + c.logger.WithFields(logrus.Fields{ + "streamId": streamUUID.String(), + }).Info("successfully sent control message") + + return nil +} + +// Unload removes the stream association from the decoder +func (c *Client) Unload(streamUUIDs []uuid.UUID) error { + errs := make([]error, 0, len(streamUUIDs)) + for _, streamUUID := range streamUUIDs { + streamIDStr := base64.StdEncoding.EncodeToString(streamUUID[:]) + if err := c.doDelete(fmt.Sprintf("/stream?stream_uuid=%s", url.PathEscape(streamIDStr))); err != nil { + err = fmt.Errorf("failed to delete stream ID association %s: %w", streamUUID.String(), err) + errs = append(errs, err) + continue + } + + c.logger.WithFields(logrus.Fields{ + "streamId": streamUUID.String(), + }).Info("successfully deleted stream ID association") + } + + return errors.Join(errs...) +} diff --git a/pkg/schema/model.go b/pkg/schema/model.go new file mode 100644 index 0000000..f0bd318 --- /dev/null +++ b/pkg/schema/model.go @@ -0,0 +1,113 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package schema + +import ( + "encoding/base64" + "encoding/json" + + "github.com/google/uuid" +) + +// UpsertSchemaRequest is the request body for the /schema endpoint +type UpsertSchemaRequest struct { + ProtoDescriptor []byte +} + +// MarshalJSON marshals the UpsertSchemaRequest to JSON +func (u UpsertSchemaRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + ProtoDescriptor string + }{ + ProtoDescriptor: base64.StdEncoding.EncodeToString(u.ProtoDescriptor), + }) +} + +// UnmarshalJSON unmarshals the UpsertSchemaRequest from JSON +func (u *UpsertSchemaRequest) UnmarshalJSON(data []byte) error { + var intermediate struct{ ProtoDescriptor string } + if err := json.Unmarshal(data, &intermediate); err != nil { + return err + } + protoDesc, err := base64.StdEncoding.DecodeString(intermediate.ProtoDescriptor) + if err != nil { + return err + } + u.ProtoDescriptor = protoDesc + return nil +} + +// AddSchemaAssociationRequest is the request body for the /stream endpoint +type AddSchemaAssociationRequest struct { + StreamUUID uuid.UUID + ProtoPackage string + ProtoMessage string +} + +// MarshalJSON marshals the AddSchemaAssociationRequest to JSON +func (a AddSchemaAssociationRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + StreamUUID string + ProtoPackage string + ProtoMessage string + }{ + StreamUUID: a.StreamUUID.String(), + ProtoPackage: a.ProtoPackage, + ProtoMessage: a.ProtoMessage, + }) +} + +// UnmarshalJSON unmarshals the AddSchemaAssociationRequest from JSON +func (a *AddSchemaAssociationRequest) UnmarshalJSON(data []byte) error { + var intermediate struct { + StreamUUID string + ProtoPackage string + ProtoMessage string + } + if err := json.Unmarshal(data, &intermediate); err != nil { + return err + } + streamUUID, err := uuid.Parse(intermediate.StreamUUID) + if err != nil { + return err + } + a.StreamUUID = streamUUID + a.ProtoPackage = intermediate.ProtoPackage + a.ProtoMessage = intermediate.ProtoMessage + return nil +} + +// SendControlRequest is the request body for the /control endpoint +type SendControlRequest struct { + StreamUUID uuid.UUID + Payload string +} + +// MarshalJSON marshals the SendControlRequest to JSON +func (s SendControlRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + StreamUUID string + Payload string + }{ + StreamUUID: s.StreamUUID.String(), + Payload: s.Payload, + }) +} + +// UnmarshalJSON unmarshals the SendControlRequest from JSON +func (s *SendControlRequest) UnmarshalJSON(data []byte) error { + var intermediate struct { + StreamUUID string + Payload string + } + if err := json.Unmarshal(data, &intermediate); err != nil { + return err + } + streamUUID, err := uuid.Parse(intermediate.StreamUUID) + if err != nil { + return err + } + s.StreamUUID = streamUUID + s.Payload = intermediate.Payload + return nil +} diff --git a/pkg/schema/options.go b/pkg/schema/options.go new file mode 100644 index 0000000..bbef271 --- /dev/null +++ b/pkg/schema/options.go @@ -0,0 +1,41 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package schema + +import ( + "fmt" + "net/url" + + "github.com/spf13/pflag" +) + +const ( + // DefaultControlPort is the default used for the local decoder server + DefaultControlPort = uint16(20789) + + controlPrefix = "decoder-api" + controlScheme = "http" + defaultControlIP = "" +) + +// Options for internal communication with the decoder +type Options struct { + ip string + port uint16 +} + +// AddOptionsToFlags adds the options to the provided flag set +func AddOptionsToFlags(flags *pflag.FlagSet, opts *Options) { + flags.StringVar(&opts.ip, controlPrefix+"-ip", defaultControlIP, "IP address of the decoder HTTP server") + flags.Uint16Var(&opts.port, controlPrefix+"-port", DefaultControlPort, "port address of the decoder HTTP server") +} + +// Parse the options +func (o *Options) Parse() error { + _, err := url.ParseRequestURI(fmt.Sprintf("%s://%s:%d", controlScheme, o.ip, o.port)) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/schema/serve.go b/pkg/schema/serve.go new file mode 100644 index 0000000..4747aba --- /dev/null +++ b/pkg/schema/serve.go @@ -0,0 +1,121 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package schema + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/google/uuid" +) + +func readBodyAs[T any](req *http.Request) (out T, err error) { + buf := new(strings.Builder) + _, err = io.Copy(buf, req.Body) + if err != nil { + return + } + err = json.Unmarshal([]byte(buf.String()), &out) + return +} + +func (s *Server) serveHTTP(ctx context.Context) error { + http.HandleFunc("/schema", func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + body, err := readBodyAs[UpsertSchemaRequest](r) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + if err := s.UpsertProtoPackage(r.Context(), &body); err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } else { + w.WriteHeader(http.StatusOK) + } + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } + }) + + http.HandleFunc("/stream", func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodPost: + body, err := readBodyAs[AddSchemaAssociationRequest](r) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + if err := s.AddStreamToSchemaAssociation(r.Context(), &body); err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } else { + w.WriteHeader(http.StatusOK) + } + + case http.MethodDelete: + streamUUIDStr := r.URL.Query().Get("stream_uuid") + unescapedStreamUUIDStr, err := url.PathUnescape(streamUUIDStr) + + if err != nil { + s.logger.WithError(err).Error("failed to unescape stream_uuid") + w.WriteHeader(http.StatusInternalServerError) + return + } + + bs, err := base64.StdEncoding.DecodeString(unescapedStreamUUIDStr) + if err != nil { + s.logger.WithError(err).Errorf("failed to decode stream_uuid from %s", unescapedStreamUUIDStr) + w.WriteHeader(http.StatusInternalServerError) + return + } + + streamUUID, err := uuid.FromBytes(bs) + if err != nil { + s.logger.WithError(err).Error("failed to parse stream id from bytes") + w.WriteHeader(http.StatusInternalServerError) + return + } + + s.DeleteStreamToSchemaAssociation(r.Context(), streamUUID) + w.WriteHeader(http.StatusAccepted) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + } + }) + + srv := &http.Server{ + Addr: fmt.Sprintf("%s:%d", s.opts.ip, s.opts.port), + Handler: nil, + } + + go func() { + stopper := make(chan os.Signal, 1) + signal.Notify(stopper, os.Interrupt, syscall.SIGTERM, syscall.SIGINT) + + select { + case <-stopper: + case <-ctx.Done(): + } + if err := srv.Close(); err != nil { + s.logger.WithError(err).Error("failed stopping the server") + } + }() + + if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { + return err + } + return nil +} diff --git a/pkg/schema/server.go b/pkg/schema/server.go new file mode 100644 index 0000000..25466d8 --- /dev/null +++ b/pkg/schema/server.go @@ -0,0 +1,131 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. + +package schema + +import ( + context "context" + "crypto/sha1" + "encoding/base64" + "fmt" + "path/filepath" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + + "github.com/google/uuid" + "github.com/sirupsen/logrus" +) + +// Server is a server that implements the DynamicDecoderServer interface +type Server struct { + ctx context.Context + logger *logrus.Logger + opts *Options + store *Store +} + +// NewServer returns a new Server +func NewServer(ctx context.Context, logger *logrus.Logger, opts *Options, store *Store) *Server { + return &Server{ + ctx: ctx, + logger: logger, + opts: opts, + store: store, + } +} + +// Serve starts the server +func (s *Server) Serve() error { + return s.serveHTTP(s.ctx) +} + +// UpsertProtoPackage registers a proto package with the server +func (s *Server) UpsertProtoPackage(_ context.Context, req *UpsertSchemaRequest) error { + checksum := sha1.Sum(req.ProtoDescriptor) + checksumAsString := base64.StdEncoding.EncodeToString(checksum[:]) + + fds := &descriptorpb.FileDescriptorSet{} + if err := proto.Unmarshal(req.ProtoDescriptor, fds); err != nil { + s.logger.WithError(err).Error("unable to unmarshal proto descriptor") + return err + } + + protoPackageFile := fds.File[0].GetName() + protoPackageName := strings.TrimSuffix(protoPackageFile, filepath.Ext(protoPackageFile)) + l := s.logger.WithFields(logrus.Fields{ + "protoPackageName": protoPackageName, + "checksum": checksumAsString, + }) + + if len(fds.File) != 1 { + err := fmt.Errorf("expected exactly one file descriptor in the set, got %d", len(fds.File)) + l.WithError(err).Error("unable to interpret proto descriptor") + return err + } + + if current, ok := s.store.schemas[protoPackageName]; ok { + if current.checksum == checksum { + l.Info("checksum matches, skipping") + return nil + } + l.Warn("overwriting existing proto package") + } else { + l.Info("setting proto package") + } + + s.store.schemas[protoPackageName] = &RecordedProtoDescriptor{ + checksum: checksum, + ProtoDescriptor: req.ProtoDescriptor, + } + + return nil +} + +// AddStreamToSchemaAssociation associates a stream with a schema +func (s *Server) AddStreamToSchemaAssociation(_ context.Context, req *AddSchemaAssociationRequest) error { + l := s.logger.WithFields(logrus.Fields{ + "protoMsg": req.ProtoMessage, + "protoPackage": req.ProtoPackage, + "streamUUID": req.StreamUUID.String(), + }) + + if current, ok := s.store.streamToSchema[req.StreamUUID]; ok { + if current.ProtoMsg == req.ProtoMessage && current.ProtoPackage == req.ProtoPackage { + return nil + } + err := fmt.Errorf("stream already has a schema association") + l.WithError(err).Error("error adding stream to schema association") + return err + } + + if _, ok := s.store.schemas[req.ProtoPackage]; !ok { + err := fmt.Errorf("proto package %s not found", req.ProtoPackage) + l.WithError(err).Error("error adding stream to schema association") + return err + } + + s.store.streamToSchema[req.StreamUUID] = &RecordedStreamToSchema{ + ProtoMsg: req.ProtoMessage, + ProtoPackage: req.ProtoPackage, + } + + l.Info("association added") + + return nil +} + +// DeleteStreamToSchemaAssociation removes the association between a stream and a schema +func (s *Server) DeleteStreamToSchemaAssociation(_ context.Context, req uuid.UUID) { + l := s.logger.WithField("streamUUID", req.String()) + + if current, ok := s.store.streamToSchema[req]; !ok { + l.Debug("no association found for stream UUID") + } else { + delete(s.store.streamToSchema, req) + l.WithFields(logrus.Fields{ + "protoMsg": current.ProtoMsg, + "protoPackage": current.ProtoPackage, + }).Info("association removed") + } +} diff --git a/pkg/schema/store.go b/pkg/schema/store.go new file mode 100644 index 0000000..7bb7542 --- /dev/null +++ b/pkg/schema/store.go @@ -0,0 +1,75 @@ +package schema + +import ( + "fmt" + + "github.com/google/uuid" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/dynamicpb" +) + +// RecordedProtoDescriptor is a recorded proto descriptor +type RecordedProtoDescriptor struct { + checksum [20]byte + ProtoDescriptor []byte +} + +// RecordedStreamToSchema is a mapping of a stream to a schema +type RecordedStreamToSchema struct { + ProtoMsg string + ProtoPackage string +} + +// Store is an in memory store for protobuf schemas +type Store struct { + schemas map[string]*RecordedProtoDescriptor + streamToSchema map[uuid.UUID]*RecordedStreamToSchema +} + +// NewStore returns a new Store +func NewStore() *Store { + return &Store{ + schemas: make(map[string]*RecordedProtoDescriptor), + streamToSchema: make(map[uuid.UUID]*RecordedStreamToSchema), + } +} + +// GetProtoMsgInstance returns a new dynamic protobuf message instance +func (s *Store) GetProtoMsgInstance(streamUUID uuid.UUID) (*dynamicpb.Message, error) { + schema, ok := s.streamToSchema[streamUUID] + if !ok { + return nil, fmt.Errorf("no schema found for stream UUID %s", streamUUID.String()) + } + + sch, ok := s.schemas[schema.ProtoPackage] + if !ok { + return nil, fmt.Errorf("no schema found for proto package %s", schema.ProtoPackage) + } + + fds := &descriptorpb.FileDescriptorSet{} + if err := proto.Unmarshal(sch.ProtoDescriptor, fds); err != nil { + return nil, err + } + + pd, err := protodesc.NewFiles(fds) + if err != nil { + return nil, err + } + + msgName := protoreflect.FullName(schema.ProtoMsg) + var desc protoreflect.Descriptor + desc, err = pd.FindDescriptorByName(msgName) + if err != nil { + return nil, err + } + + md, ok := desc.(protoreflect.MessageDescriptor) + if !ok { + return nil, fmt.Errorf("failed to cast desc to protoreflect.MessageDescriptor, got %T", desc) + } + + return dynamicpb.NewMessage(md), nil +} diff --git a/setup_jbpfp_env.sh b/setup_jbpfp_env.sh new file mode 100755 index 0000000..540e785 --- /dev/null +++ b/setup_jbpfp_env.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +export JBPFP_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)" +export NANO_PB=$JBPFP_PATH/3p/nanopb +source $JBPFP_PATH/jbpf/setup_jbpf_env.sh diff --git a/testdata/example1/example.options b/testdata/example1/example.options new file mode 100644 index 0000000..c393729 --- /dev/null +++ b/testdata/example1/example.options @@ -0,0 +1,3 @@ +request.name max_size:32 +response.msg max_size:100 +status.status max_size:100 diff --git a/testdata/example1/example.proto b/testdata/example1/example.proto new file mode 100644 index 0000000..47597e7 --- /dev/null +++ b/testdata/example1/example.proto @@ -0,0 +1,35 @@ +syntax = "proto2"; + +enum my_state { + GOOD=0 ; + BAD=1 ; +} + +message my_struct { + required uint32 a_num = 1; + optional uint32 another_num = 2; +} + +message request { + required uint32 id = 1; + required string name = 2; + optional my_state state = 3; +} + +message response { + required uint32 id = 1; + required string msg = 2; +} + +message req_resp { + oneof req_or_resp { + request req = 1; + response resp = 2; + } +} + +message status { + required uint32 id = 1 ; + required string status = 2; + required my_struct a_struct = 3; +} diff --git a/testdata/example2/example2.options b/testdata/example2/example2.options new file mode 100644 index 0000000..8888f25 --- /dev/null +++ b/testdata/example2/example2.options @@ -0,0 +1 @@ +item.name max_size:30 diff --git a/testdata/example2/example2.proto b/testdata/example2/example2.proto new file mode 100644 index 0000000..0169988 --- /dev/null +++ b/testdata/example2/example2.proto @@ -0,0 +1,6 @@ +syntax = "proto2"; + +message item { + required string name = 1; + optional uint32 val = 2; +} diff --git a/testdata/example3/example3.options b/testdata/example3/example3.options new file mode 100644 index 0000000..bcb7cd4 --- /dev/null +++ b/testdata/example3/example3.options @@ -0,0 +1,14 @@ +obj.bytesval max_size:20 +obj.sval max_size:20 +obj.barr max_count:10 +obj.darr max_count:10 +obj.f32arr max_count:10 +obj.f64arr max_count:10 +obj.i32arr max_count:10 +obj.i64arr max_count:10 +obj.sf32arr max_count:10 +obj.sf64arr max_count:10 +obj.si32arr max_count:10 +obj.si64arr max_count:10 +obj.ui32arr max_count:10 +obj.ui64arr max_count:10 diff --git a/testdata/example3/example3.proto b/testdata/example3/example3.proto new file mode 100644 index 0000000..b0c7e5a --- /dev/null +++ b/testdata/example3/example3.proto @@ -0,0 +1,31 @@ +syntax = "proto2"; + +message obj { + required bool bval = 1; + required bytes bytesval = 2; + required double dval = 3; + required fixed32 f32val = 4; + required fixed64 f64val = 5; + required int32 i32val = 6; + required int64 i64val = 7; + required sfixed32 sf32val = 8; + required sfixed64 sf64val = 9; + required sint32 si32val = 10; + required sint64 si64val = 11; + required string sval = 12; + required uint32 ui32val = 13; + required uint64 ui64val = 14; + + repeated bool barr = 15; + repeated double darr = 16; + repeated fixed32 f32arr = 17; + repeated fixed64 f64arr = 18; + repeated int32 i32arr = 19; + repeated int64 i64arr = 20; + repeated sfixed32 sf32arr = 21; + repeated sfixed64 sf64arr = 22; + repeated sint32 si32arr = 23; + repeated sint64 si64arr = 24; + repeated uint32 ui32arr = 25; + repeated uint64 ui64arr = 26; +}