net/vpp: move to VPP 19.04

This patch updates net/vpp implementation from version VPP 19.01 to
VPP 19.04.

1. Some binary APIs are deprecated in 19.04 and message queue is used
   to handle control events:

   - vl_api_bind_sock_reply_t_handler by SESSION_CTRL_EVT_BOUND,
   - vl_api_unbind_sock_reply_t_handler by SESSION_CTRL_EVT_UNLISTEN_REPLY,
   - vl_api_accept_session_t_handler by SESSION_CTRL_EVT_ACCEPTED,
   - vl_api_connect_session_reply_t_handler by SESSION_CTRL_EVT_CONNECTED,
   - vl_api_disconnect_session_t_handler by SESSION_CTRL_EVT_DISCONNECTED,
   - vl_api_reset_session_t_handler by SESSION_CTRL_EVT_RESET

2. Fixes for Fedora 29/30:

   - added "-Wno-address-of-packed-member" (DPDK 19.02 fails to compile
     with gcc9.1),
   - force "-maes" compile flag for gcc9.1 to compile crypto_ia32 and
     crypto_ipsecmb plugins (gcc9.1 doesn't do that for -march=silvermont)
   - some minor fixes

3. Default path for VPP instalation is changed for test scripts from
   /usr/local/src/vpp to /usr/local/src/vpp-19.04 to avoid VPP version
   conflict.

Change-Id: I1d20ad7f138f5086ba7fab41d77d86f8139d038e
Signed-off-by: Tomasz Kulasek <tomaszx.kulasek@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/459113
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Reviewed-by: Shuhei Matsumoto <shuhei.matsumoto.xt@hitachi.com>
This commit is contained in:
Tomasz Kulasek 2019-06-19 16:14:50 +02:00 committed by Jim Harris
parent bee1130c03
commit 9307ff5a54
8 changed files with 112 additions and 157 deletions

View File

@ -7,7 +7,7 @@ packet processing graph (see [What is VPP?](https://wiki.fd.io/view/VPP/What_is_
Detailed instructions for **simplified steps 1-3** below, can be found on Detailed instructions for **simplified steps 1-3** below, can be found on
VPP [Quick Start Guide](https://wiki.fd.io/view/VPP). VPP [Quick Start Guide](https://wiki.fd.io/view/VPP).
*SPDK supports VPP version 19.01.1.* *SPDK supports VPP version 19.04.2.*
# 1. Building VPP (optional) {#vpp_build} # 1. Building VPP (optional) {#vpp_build}
@ -16,21 +16,7 @@ VPP [Quick Start Guide](https://wiki.fd.io/view/VPP).
Clone and checkout VPP Clone and checkout VPP
~~~ ~~~
git clone https://gerrit.fd.io/r/vpp && cd vpp git clone https://gerrit.fd.io/r/vpp && cd vpp
git checkout v19.01.1 git checkout stable/1904
git cherry-pick 97dcf5bd26ca6de580943f5d39681f0144782c3d
git cherry-pick f5dc9fbf814865b31b52b20f5bf959e9ff818b25
~~~
NOTE: Cherry-picks are required for better integration with SPDK. They are
already merged to VPP 19.04.
NOTE: We have noticed that VPP tries to close connections to the non existing,
already closed applications, after timeout. It causes intermittent VPP application
segfaults when few instances of VPP clients connects and disconnects several times.
The following workaround for this issue helps to create more stable environment
for VPP v19.01.1. This issue should be solved in the next release of VPP.
~~~
git apply test/common/config/patch/vpp/workaround-dont-notify-transport-closing.patch
~~~ ~~~
Install VPP build dependencies Install VPP build dependencies
@ -93,8 +79,6 @@ DPDK section (`dpdk`):
- `num-rx-queues <num>` -- number of receive queues. - `num-rx-queues <num>` -- number of receive queues.
- `num-tx-queues <num>` -- number of transmit queues. - `num-tx-queues <num>` -- number of transmit queues.
- `dev <PCI address>` -- whitelisted device. - `dev <PCI address>` -- whitelisted device.
- `num-mbufs` -- numbers of allocated buffers. For the most of our scenarios this
parameter requires to be increased over default value.
Session section (`session`): Session section (`session`):
- `evt_qs_memfd_seg` -- uses a memfd segment for event queues. This is required for SPDK. - `evt_qs_memfd_seg` -- uses a memfd segment for event queues. This is required for SPDK.
@ -115,9 +99,6 @@ unix {
cpu { cpu {
main-core 1 main-core 1
} }
dpdk {
num-mbufs 128000
}
session { session {
evt_qs_memfd_seg evt_qs_memfd_seg
} }

View File

@ -284,13 +284,11 @@ enum spdk_vpp_create_type {
* VPP message handlers * VPP message handlers
*/ */
static void static void
vl_api_accept_session_t_handler(vl_api_accept_session_t *mp) session_accepted_handler(session_accepted_msg_t *mp)
{ {
svm_fifo_t *rx_fifo, *tx_fifo; svm_fifo_t *rx_fifo, *tx_fifo;
struct spdk_vpp_session *client_session, *listen_session; struct spdk_vpp_session *client_session, *listen_session;
SPDK_DEBUGLOG(SPDK_SOCK_VPP, "listeners handle is %" PRIu64 "\n", mp->listener_handle);
pthread_mutex_lock(&g_svm.session_get_lock); pthread_mutex_lock(&g_svm.session_get_lock);
listen_session = _spdk_vpp_session_get_by_handle(mp->listener_handle, true); listen_session = _spdk_vpp_session_get_by_handle(mp->listener_handle, true);
pthread_mutex_unlock(&g_svm.session_get_lock); pthread_mutex_unlock(&g_svm.session_get_lock);
@ -299,6 +297,8 @@ vl_api_accept_session_t_handler(vl_api_accept_session_t *mp)
return; return;
} }
SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Listeners handle is %" PRIu64 "\n", mp->listener_handle);
/* Allocate local session for a client and set it up */ /* Allocate local session for a client and set it up */
client_session = _spdk_vpp_session_create(); client_session = _spdk_vpp_session_create();
if (client_session == NULL) { if (client_session == NULL) {
@ -343,7 +343,7 @@ vl_api_accept_session_t_handler(vl_api_accept_session_t *mp)
} }
static void static void
vl_api_connect_session_reply_t_handler(vl_api_connect_session_reply_t *mp) session_connected_handler(session_connected_msg_t *mp)
{ {
struct spdk_vpp_session *session; struct spdk_vpp_session *session;
svm_fifo_t *rx_fifo, *tx_fifo; svm_fifo_t *rx_fifo, *tx_fifo;
@ -380,14 +380,15 @@ vl_api_connect_session_reply_t_handler(vl_api_connect_session_reply_t *mp)
} }
static void static void
vl_api_disconnect_session_t_handler(vl_api_disconnect_session_t *mp) session_disconnected_handler(session_disconnected_msg_t *mp)
{ {
struct spdk_vpp_session *session = 0; struct spdk_vpp_session *session = 0;
pthread_mutex_lock(&g_svm.session_get_lock); pthread_mutex_lock(&g_svm.session_get_lock);
session = _spdk_vpp_session_get_by_handle(mp->handle, false); session = _spdk_vpp_session_get_by_handle(mp->handle, false);
if (session == NULL) { if (session == NULL) {
SPDK_ERRLOG("Invalid session handler (%" PRIu64 ").\n", mp->handle); SPDK_ERRLOG("Session with handle=%" PRIu64 " not found.\n",
mp->handle);
pthread_mutex_unlock(&g_svm.session_get_lock); pthread_mutex_unlock(&g_svm.session_get_lock);
return; return;
} }
@ -399,16 +400,18 @@ vl_api_disconnect_session_t_handler(vl_api_disconnect_session_t *mp)
} }
static void static void
vl_api_reset_session_t_handler(vl_api_reset_session_t *mp) session_reset_handler(session_reset_msg_t *mp)
{ {
vl_api_reset_session_reply_t *rmp;
int rv = 0; int rv = 0;
struct spdk_vpp_session *session = 0; struct spdk_vpp_session *session = NULL;
app_session_evt_t app_evt;
session_reset_reply_msg_t *rmp;
pthread_mutex_lock(&g_svm.session_get_lock); pthread_mutex_lock(&g_svm.session_get_lock);
session = _spdk_vpp_session_get_by_handle(mp->handle, false); session = _spdk_vpp_session_get_by_handle(mp->handle, false);
if (session == NULL) { if (session == NULL) {
SPDK_ERRLOG("Invalid session handler (%" PRIu64 ").\n", mp->handle); SPDK_ERRLOG("Session with handle=%" PRIu64 " not found.\n",
mp->handle);
pthread_mutex_unlock(&g_svm.session_get_lock); pthread_mutex_unlock(&g_svm.session_get_lock);
return; return;
} }
@ -417,19 +420,16 @@ vl_api_reset_session_t_handler(vl_api_reset_session_t *mp)
session->app_session.session_state = VPP_SESSION_STATE_DISCONNECT; session->app_session.session_state = VPP_SESSION_STATE_DISCONNECT;
pthread_mutex_unlock(&g_svm.session_get_lock); pthread_mutex_unlock(&g_svm.session_get_lock);
rmp = vl_msg_api_alloc(sizeof(*rmp)); app_alloc_ctrl_evt_to_vpp(session->app_session.vpp_evt_q, &app_evt,
if (rmp == NULL) { SESSION_CTRL_EVT_RESET_REPLY);
return; rmp = (session_reset_reply_msg_t *) app_evt.evt->data;
}
memset(rmp, 0, sizeof(*rmp));
rmp->_vl_msg_id = ntohs(VL_API_RESET_SESSION_REPLY);
rmp->retval = rv; rmp->retval = rv;
rmp->handle = mp->handle; rmp->handle = mp->handle;
vl_msg_api_send_shmem(g_svm.vl_input_queue, (u8 *)&rmp); app_send_ctrl_evt_to_vpp(session->app_session.vpp_evt_q, &app_evt);
} }
static void static void
vl_api_bind_sock_reply_t_handler(vl_api_bind_sock_reply_t *mp) session_bound_handler(session_bound_msg_t *mp)
{ {
struct spdk_vpp_session *session; struct spdk_vpp_session *session;
@ -459,7 +459,7 @@ vl_api_bind_sock_reply_t_handler(vl_api_bind_sock_reply_t *mp)
} }
static void static void
vl_api_unbind_sock_reply_t_handler(vl_api_unbind_sock_reply_t *mp) session_unlisten_reply_handler(session_unlisten_reply_msg_t *mp)
{ {
struct spdk_vpp_session *session; struct spdk_vpp_session *session;
@ -478,6 +478,33 @@ vl_api_unbind_sock_reply_t_handler(vl_api_unbind_sock_reply_t *mp)
session->app_session.session_state = VPP_SESSION_STATE_CLOSE; session->app_session.session_state = VPP_SESSION_STATE_CLOSE;
} }
static void
handle_mq_event(session_event_t *e)
{
switch (e->event_type) {
case SESSION_CTRL_EVT_BOUND:
session_bound_handler((session_bound_msg_t *) e->data);
break;
case SESSION_CTRL_EVT_ACCEPTED:
session_accepted_handler((session_accepted_msg_t *) e->data);
break;
case SESSION_CTRL_EVT_CONNECTED:
session_connected_handler((session_connected_msg_t *) e->data);
break;
case SESSION_CTRL_EVT_DISCONNECTED:
session_disconnected_handler((session_disconnected_msg_t *) e->data);
break;
case SESSION_CTRL_EVT_RESET:
session_reset_handler((session_reset_msg_t *) e->data);
break;
case SESSION_CTRL_EVT_UNLISTEN_REPLY:
session_unlisten_reply_handler((session_unlisten_reply_msg_t *) e->data);
break;
default:
SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Unhandled event %u\n", e->event_type);
}
}
static int static int
vpp_queue_poller(void *ctx) vpp_queue_poller(void *ctx)
{ {
@ -494,12 +521,15 @@ vpp_queue_poller(void *ctx)
static int static int
app_queue_poller(void *ctx) app_queue_poller(void *ctx)
{ {
session_event_t *e;
svm_msg_q_msg_t msg; svm_msg_q_msg_t msg;
if (!svm_msg_q_is_empty(g_svm.app_event_queue)) { if (!svm_msg_q_is_empty(g_svm.app_event_queue)) {
svm_msg_q_sub(g_svm.app_event_queue, &msg, SVM_Q_WAIT, 0); svm_msg_q_sub(g_svm.app_event_queue, &msg, SVM_Q_WAIT, 0);
e = svm_msg_q_msg_data(g_svm.app_event_queue, &msg);
handle_mq_event(e);
svm_msg_q_free_msg(g_svm.app_event_queue, &msg); svm_msg_q_free_msg(g_svm.app_event_queue, &msg);
} }
return 0; return 0;
} }
@ -580,22 +610,21 @@ _spdk_vpp_session_disconnect(struct spdk_vpp_session *session)
{ {
int rv = 0; int rv = 0;
vl_api_disconnect_session_t *dmp; vl_api_disconnect_session_t *dmp;
vl_api_disconnect_session_reply_t *rmp; session_disconnected_reply_msg_t *rmp;
app_session_evt_t app_evt;
if (session->app_session.session_state == VPP_SESSION_STATE_DISCONNECT) { if (session->app_session.session_state == VPP_SESSION_STATE_DISCONNECT) {
SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Session is already in disconnecting state %p (%d)\n", SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Session is already in disconnecting state %p (%d)\n",
session, session->id); session, session->id);
rmp = vl_msg_api_alloc(sizeof(*rmp)); app_alloc_ctrl_evt_to_vpp(session->app_session.vpp_evt_q, &app_evt,
if (rmp == NULL) { SESSION_CTRL_EVT_DISCONNECTED_REPLY);
return -ENOMEM; rmp = (session_disconnected_reply_msg_t *) app_evt.evt->data;
}
memset(rmp, 0, sizeof(*rmp));
rmp->_vl_msg_id = ntohs(VL_API_DISCONNECT_SESSION_REPLY);
rmp->retval = rv; rmp->retval = rv;
rmp->handle = session->handle; rmp->handle = session->handle;
vl_msg_api_send_shmem(g_svm.vl_input_queue, (u8 *)&rmp); rmp->context = session->context;
app_send_ctrl_evt_to_vpp(session->app_session.vpp_evt_q, &app_evt);
return 0; return 0;
} }
SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Disconnect session %p (%d)\n", session, session->id); SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Disconnect session %p (%d)\n", session, session->id);
@ -747,8 +776,8 @@ spdk_vpp_sock_accept(struct spdk_sock *_sock)
struct spdk_vpp_session *client_session = NULL; struct spdk_vpp_session *client_session = NULL;
u32 client_session_index = ~0; u32 client_session_index = ~0;
uword elts = 0; uword elts = 0;
int rv = 0; app_session_evt_t app_evt;
vl_api_accept_session_reply_t *rmp; session_accepted_reply_msg_t *rmp;
assert(listen_session != NULL); assert(listen_session != NULL);
assert(g_svm.vpp_initialized); assert(g_svm.vpp_initialized);
@ -790,16 +819,12 @@ spdk_vpp_sock_accept(struct spdk_sock *_sock)
/* /*
* Send accept session reply * Send accept session reply
*/ */
rmp = vl_msg_api_alloc(sizeof(*rmp)); app_alloc_ctrl_evt_to_vpp(client_session->app_session.vpp_evt_q, &app_evt,
if (rmp == NULL) { SESSION_CTRL_EVT_ACCEPTED_REPLY);
return NULL; rmp = (session_accepted_reply_msg_t *) app_evt.evt->data;
}
memset(rmp, 0, sizeof(*rmp));
rmp->_vl_msg_id = ntohs(VL_API_ACCEPT_SESSION_REPLY);
rmp->retval = htonl(rv);
rmp->context = client_session->context;
rmp->handle = client_session->handle; rmp->handle = client_session->handle;
vl_msg_api_send_shmem(g_svm.vl_input_queue, (u8 *)&rmp); rmp->context = client_session->context;
app_send_ctrl_evt_to_vpp(client_session->app_session.vpp_evt_q, &app_evt);
return &client_session->base; return &client_session->base;
} }
@ -843,6 +868,8 @@ spdk_vpp_sock_recv(struct spdk_sock *_sock, void *buf, size_t len)
if (bytes == 0) { if (bytes == 0) {
if (session->app_session.session_state == VPP_SESSION_STATE_DISCONNECT) { if (session->app_session.session_state == VPP_SESSION_STATE_DISCONNECT) {
/* Socket is disconnected */ /* Socket is disconnected */
SPDK_DEBUGLOG(SPDK_SOCK_VPP, "Client %p(%" PRIu32 ") is disconnected.\n",
session, session->id);
errno = 0; errno = 0;
return 0; return 0;
} }
@ -850,7 +877,7 @@ spdk_vpp_sock_recv(struct spdk_sock *_sock, void *buf, size_t len)
return -1; return -1;
} }
rc = svm_fifo_dequeue_nowait(rx_fifo, bytes, buf); rc = app_recv_stream_raw(rx_fifo, buf, bytes, 0, 0);
if (rc < 0) { if (rc < 0) {
errno = -rc; errno = -rc;
return rc; return rc;
@ -901,7 +928,7 @@ spdk_vpp_sock_writev(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
assert(g_svm.vpp_initialized); assert(g_svm.vpp_initialized);
tx_fifo = session->app_session.tx_fifo; tx_fifo = session->app_session.tx_fifo;
et = FIFO_EVENT_APP_TX; et = SESSION_IO_EVT_TX;
for (i = 0; i < iovcnt; ++i) { for (i = 0; i < iovcnt; ++i) {
if (svm_fifo_is_full(tx_fifo)) { if (svm_fifo_is_full(tx_fifo)) {
@ -911,7 +938,9 @@ spdk_vpp_sock_writev(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
/* We use only stream connection for now */ /* We use only stream connection for now */
rc = app_send_stream_raw(tx_fifo, session->app_session.vpp_evt_q, rc = app_send_stream_raw(tx_fifo, session->app_session.vpp_evt_q,
iov[i].iov_base, iov[i].iov_len, et, SVM_Q_WAIT); iov[i].iov_base, iov[i].iov_len, et,
1, SVM_Q_WAIT);
if (rc < 0) { if (rc < 0) {
if (total > 0) { if (total > 0) {
break; break;
@ -1102,7 +1131,9 @@ _spdk_vpp_app_attach(void)
bmp->client_index = g_svm.my_client_index; bmp->client_index = g_svm.my_client_index;
bmp->context = ntohl(0xfeedface); bmp->context = ntohl(0xfeedface);
bmp->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_ADD_SEGMENT; bmp->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_ACCEPT_REDIRECT;
bmp->options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_ADD_SEGMENT;
bmp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 16; bmp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 16;
bmp->options[APP_OPTIONS_RX_FIFO_SIZE] = fifo_size; bmp->options[APP_OPTIONS_RX_FIFO_SIZE] = fifo_size;
bmp->options[APP_OPTIONS_TX_FIFO_SIZE] = fifo_size; bmp->options[APP_OPTIONS_TX_FIFO_SIZE] = fifo_size;
@ -1311,13 +1342,7 @@ spdk_vpp_net_framework_set_handlers(void)
vl_api_##n##_t_print, \ vl_api_##n##_t_print, \
sizeof(vl_api_##n##_t), 1); sizeof(vl_api_##n##_t), 1);
_(SESSION_ENABLE_DISABLE_REPLY, session_enable_disable_reply) \ _(SESSION_ENABLE_DISABLE_REPLY, session_enable_disable_reply) \
_(BIND_SOCK_REPLY, bind_sock_reply) \
_(UNBIND_SOCK_REPLY, unbind_sock_reply) \
_(ACCEPT_SESSION, accept_session) \
_(CONNECT_SESSION_REPLY, connect_session_reply) \
_(DISCONNECT_SESSION, disconnect_session) \
_(DISCONNECT_SESSION_REPLY, disconnect_session_reply) \ _(DISCONNECT_SESSION_REPLY, disconnect_session_reply) \
_(RESET_SESSION, reset_session) \
_(APPLICATION_ATTACH_REPLY, application_attach_reply) \ _(APPLICATION_ATTACH_REPLY, application_attach_reply) \
_(APPLICATION_DETACH_REPLY, application_detach_reply) \ _(APPLICATION_DETACH_REPLY, application_detach_reply) \
_(MAP_ANOTHER_SEGMENT, map_another_segment) _(MAP_ANOTHER_SEGMENT, map_another_segment)

View File

@ -179,7 +179,7 @@ if [ -d /usr/include/rbd ] && [ -d /usr/include/rados ] && [ $SPDK_TEST_RBD -eq
fi fi
if [ $SPDK_TEST_VPP -eq 1 ]; then if [ $SPDK_TEST_VPP -eq 1 ]; then
VPP_PATH="/usr/local/src/vpp/build-root/install-vpp_debug-native/vpp/" VPP_PATH="/usr/local/src/vpp-19.04/build-root/install-vpp_debug-native/vpp/"
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${VPP_PATH}/lib/ export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${VPP_PATH}/lib/
export PATH=${PATH}:${VPP_PATH}/bin/ export PATH=${PATH}:${VPP_PATH}/bin/
config_params+=" --with-vpp=${VPP_PATH}" config_params+=" --with-vpp=${VPP_PATH}"

View File

@ -1,9 +1,9 @@
diff --git a/Makefile b/Makefile diff --git a/Makefile b/Makefile
index 900c1efb4..4889eefbe 100644 index 8c7f3523f..b6a79529c 100644
--- a/Makefile --- a/Makefile
+++ b/Makefile +++ b/Makefile
@@ -92,9 +92,11 @@ RPM_DEPENDS += ninja-build @@ -90,10 +90,12 @@ RPM_DEPENDS += libuuid-devel
RPM_DEPENDS += libuuid-devel RPM_DEPENDS += mbedtls-devel
ifeq ($(OS_ID),fedora) ifeq ($(OS_ID),fedora)
- RPM_DEPENDS += dnf-utils - RPM_DEPENDS += dnf-utils
@ -12,8 +12,9 @@ index 900c1efb4..4889eefbe 100644
+ endif + endif
RPM_DEPENDS += subunit subunit-devel RPM_DEPENDS += subunit subunit-devel
- RPM_DEPENDS += compat-openssl10-devel - RPM_DEPENDS += compat-openssl10-devel
- RPM_DEPENDS += python2-devel python34-ply
+ RPM_DEPENDS += openssl-devel + RPM_DEPENDS += openssl-devel
RPM_DEPENDS += python2-devel python2-ply + RPM_DEPENDS += python2-devel
RPM_DEPENDS += python2-virtualenv RPM_DEPENDS += python2-virtualenv
RPM_DEPENDS += mbedtls-devel RPM_DEPENDS += cmake
RPM_DEPENDS_GROUPS = 'C Development Tools and Libraries'

View File

@ -1,21 +1,23 @@
diff --git a/Makefile b/Makefile diff --git a/Makefile b/Makefile
index 900c1efb4..4a2aa231e 100644 index 8c7f3523f..20814ee8d 100644
--- a/Makefile --- a/Makefile
+++ b/Makefile +++ b/Makefile
@@ -94,7 +94,7 @@ RPM_DEPENDS += libuuid-devel @@ -92,8 +92,8 @@ RPM_DEPENDS += mbedtls-devel
ifeq ($(OS_ID),fedora) ifeq ($(OS_ID),fedora)
RPM_DEPENDS += dnf-utils RPM_DEPENDS += dnf-utils
RPM_DEPENDS += subunit subunit-devel RPM_DEPENDS += subunit subunit-devel
- RPM_DEPENDS += compat-openssl10-devel - RPM_DEPENDS += compat-openssl10-devel
- RPM_DEPENDS += python2-devel python34-ply
+ RPM_DEPENDS += openssl-devel + RPM_DEPENDS += openssl-devel
RPM_DEPENDS += python2-devel python2-ply + RPM_DEPENDS += python2-devel
RPM_DEPENDS += python2-virtualenv RPM_DEPENDS += python2-virtualenv
RPM_DEPENDS += mbedtls-devel RPM_DEPENDS += cmake
RPM_DEPENDS_GROUPS = 'C Development Tools and Libraries'
diff --git a/build/external/packages/dpdk.mk b/build/external/packages/dpdk.mk diff --git a/build/external/packages/dpdk.mk b/build/external/packages/dpdk.mk
index 6c46ac298..227a0772d 100644 index a551151bb..b0258017a 100644
--- a/build/external/packages/dpdk.mk --- a/build/external/packages/dpdk.mk
+++ b/build/external/packages/dpdk.mk +++ b/build/external/packages/dpdk.mk
@@ -148,7 +148,7 @@ endif @@ -147,7 +147,7 @@ endif
endif endif
endif endif
@ -24,51 +26,21 @@ index 6c46ac298..227a0772d 100644
# assemble DPDK make arguments # assemble DPDK make arguments
DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \ DPDK_MAKE_ARGS := -C $(DPDK_SOURCE) -j $(JOBS) \
diff --git a/src/cmake/memfd.cmake b/src/cmake/memfd.cmake diff --git a/src/plugins/crypto_ia32/CMakeLists.txt b/src/plugins/crypto_ia32/CMakeLists.txt
index ca499c459..f7eec2c10 100644 index a100cdbb6..92e408098 100644
--- a/src/cmake/memfd.cmake --- a/src/plugins/crypto_ia32/CMakeLists.txt
+++ b/src/cmake/memfd.cmake +++ b/src/plugins/crypto_ia32/CMakeLists.txt
@@ -24,3 +24,12 @@ if (HAVE_MEMFD_CREATE) @@ -22,3 +22,4 @@ add_vpp_plugin(crypto_ia32
add_definitions(-DHAVE_MEMFD_CREATE) )
target_compile_options(crypto_ia32_plugin PRIVATE "-march=silvermont")
+target_compile_options(crypto_ia32_plugin PRIVATE "-maes")
diff --git a/src/plugins/crypto_ipsecmb/CMakeLists.txt b/src/plugins/crypto_ipsecmb/CMakeLists.txt
index 0d08032c0..6a7eb148f 100644
--- a/src/plugins/crypto_ipsecmb/CMakeLists.txt
+++ b/src/plugins/crypto_ipsecmb/CMakeLists.txt
@@ -39,3 +39,4 @@ else()
endif() endif()
+check_c_source_compiles(" target_compile_options(crypto_ipsecmb_plugin PRIVATE "-march=silvermont")
+ #define _GNU_SOURCE +target_compile_options(crypto_ipsecmb_plugin PRIVATE "-maes")
+ #include <sched.h>
+ int main() { return getcpu (0, 0); }
+" HAVE_GETCPU)
+
+if (HAVE_GETCPU)
+ add_definitions(-DHAVE_GETCPU)
+endif()
diff --git a/src/vppinfra/linux/syscall.h b/src/vppinfra/linux/syscall.h
index 1ae029d58..99d1a3ab6 100644
--- a/src/vppinfra/linux/syscall.h
+++ b/src/vppinfra/linux/syscall.h
@@ -19,11 +19,13 @@
#include <unistd.h>
#include <sys/syscall.h>
+#ifndef HAVE_GETCPU
static inline int
getcpu (unsigned *cpu, unsigned *node, void *tcache)
{
return syscall (__NR_getcpu, cpu, node, tcache);
}
+#endif
static inline long
set_mempolicy (int mode, const unsigned long *nodemask, unsigned long maxnode)
diff --git a/src/vppinfra/pmalloc.c b/src/vppinfra/pmalloc.c
index 365ee0443..ed1c0329f 100644
--- a/src/vppinfra/pmalloc.c
+++ b/src/vppinfra/pmalloc.c
@@ -53,7 +53,7 @@ pmalloc_validate_numa_node (u32 * numa_node)
if (*numa_node == CLIB_PMALLOC_NUMA_LOCAL)
{
u32 cpu;
- if (getcpu (&cpu, numa_node, 0) != 0)
+ if (getcpu (&cpu, numa_node) != 0)
return 1;
}
return 0;

View File

@ -1,13 +0,0 @@
diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c
index 392d694..e7efa77 100644
--- a/src/vnet/tcp/tcp_input.c
+++ b/src/vnet/tcp/tcp_input.c
@@ -1644,6 +1644,8 @@ tcp_handle_disconnects (tcp_worker_ctx_t * wrk)
for (i = 0; i < vec_len (pending_disconnects); i++)
{
tc = tcp_connection_get (pending_disconnects[i], thread_index);
+ if (tc == NULL)
+ continue;
tcp_disconnect_pending_off (tc);
session_transport_closing_notify (&tc->connection);
}

View File

@ -237,18 +237,7 @@ function install_vpp()
fi fi
else else
git clone "${GIT_REPO_VPP}" git clone "${GIT_REPO_VPP}"
git -C ./vpp checkout v19.01.1 git -C ./vpp checkout stable/1904
git -C ./vpp cherry-pick 97dcf5bd26ca6de580943f5d39681f0144782c3d
git -C ./vpp cherry-pick f5dc9fbf814865b31b52b20f5bf959e9ff818b25
# Following patch for VPP is required due to the VPP tries to close
# connections to the non existing applications after timeout.
# It causes intermittent VPP application segfaults in our tests
# when few instances of VPP clients connects and disconnects several
# times.
# This workaround is only for VPP v19.01.1 and should be solved in
# the next release.
git -C ./vpp apply ${VM_SETUP_PATH}/patch/vpp/workaround-dont-notify-transport-closing.patch
if [ "${OSID}" == 'fedora' ]; then if [ "${OSID}" == 'fedora' ]; then
if [ ${OSVERSION} -eq 29 ]; then if [ ${OSVERSION} -eq 29 ]; then
@ -264,7 +253,7 @@ function install_vpp()
make -C ./vpp build -j${jobs} make -C ./vpp build -j${jobs}
sudo mv ./vpp /usr/local/src/ sudo mv ./vpp /usr/local/src/vpp-19.04
fi fi
fi fi
} }

View File

@ -132,7 +132,7 @@ function start_vpp() {
# Start VPP process in SPDK target network namespace # Start VPP process in SPDK target network namespace
$TARGET_NS_CMD vpp \ $TARGET_NS_CMD vpp \
unix { nodaemon cli-listen /run/vpp/cli.sock } \ unix { nodaemon cli-listen /run/vpp/cli.sock } \
dpdk { no-pci num-mbufs 128000 } \ dpdk { no-pci } \
session { evt_qs_memfd_seg } \ session { evt_qs_memfd_seg } \
socksvr { socket-name /run/vpp-api.sock } \ socksvr { socket-name /run/vpp-api.sock } \
plugins { \ plugins { \