OpenMPI  0.1.1
btl_wv_endpoint.h
1 /*
2  * Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
3  * University Research and Technology
4  * Corporation. All rights reserved.
5  * Copyright (c) 2004-2006 The University of Tennessee and The University
6  * of Tennessee Research Foundation. All rights
7  * reserved.
8  * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
9  * University of Stuttgart. All rights reserved.
10  * Copyright (c) 2004-2005 The Regents of the University of California.
11  * All rights reserved.
12  * Copyright (c) 2007-2009 Cisco Systems, Inc. All rights reserved.
13  * Copyright (c) 2006-2007 Los Alamos National Security, LLC. All rights
14  * reserved.
15  * Copyright (c) 2006-2007 Voltaire All rights reserved.
16  * Copyright (c) 2007-2009 Mellanox Technologies. All rights reserved.
17  * Copyright (c) 2010 Oracle and/or its affiliates. All rights reserved.
18  * $COPYRIGHT$
19  *
20  * Additional copyrights may follow
21  *
22  * $HEADER$
23  */
24 
25 #ifndef MCA_BTL_IB_ENDPOINT_H
26 #define MCA_BTL_IB_ENDPOINT_H
27 
28 #include "opal/class/opal_list.h"
29 #include "opal/mca/event/event.h"
30 #include "opal/util/output.h"
31 #include "ompi/mca/btl/btl.h"
32 #include "btl_wv.h"
33 #include "btl_wv_frag.h"
34 #include "btl_wv_eager_rdma.h"
35 #include <errno.h>
36 #include <string.h>
37 #include "ompi/mca/btl/base/btl_base_error.h"
38 #include "connect/base.h"
39 
40 BEGIN_C_DECLS
41 
42 struct mca_btl_wv_frag_t;
44 
45 /**
46  * State of IB endpoint connection.
47  */
48 
49 typedef enum {
50  /* Defines the state in which this BTL instance
51  * has started the process of connection */
52  MCA_BTL_IB_CONNECTING,
53 
54  /* Waiting for ack from endpoint */
55  MCA_BTL_IB_CONNECT_ACK,
56 
57  /*Waiting for final connection ACK from endpoint */
58  MCA_BTL_IB_WAITING_ACK,
59 
60  /* Connected ... both sender & receiver have
61  * buffers associated with this connection */
62  MCA_BTL_IB_CONNECTED,
63 
64  /* Connection is closed, there are no resources
65  * associated with this */
66  MCA_BTL_IB_CLOSED,
67 
68  /* Maximum number of retries have been used.
69  * Report failure on send to upper layer */
70  MCA_BTL_IB_FAILED
71 } mca_btl_wv_endpoint_state_t;
72 
73 typedef struct mca_btl_wv_rem_qp_info_t {
74  uint32_t rem_qp_num;
75  /* Remote QP number */
76  uint32_t rem_psn;
77  /* Remote processes port sequence number */
79 
80 typedef struct mca_btl_wv_rem_srq_info_t {
81  /* Remote SRQ number */
82  uint32_t rem_srq_num;
84 
85 typedef struct mca_btl_wv_rem_info_t {
86  /* Local identifier of the remote process */
87  uint16_t rem_lid;
88  /* subnet id of remote process */
89  uint64_t rem_subnet_id;
90  /* MTU of remote process */
91  uint32_t rem_mtu;
92  /* index of remote endpoint in endpoint array */
93  uint32_t rem_index;
94  /* Remote QPs */
95  mca_btl_wv_rem_qp_info_t *rem_qps;
96  /* Remote xrc_srq info, used only with XRC connections */
97  mca_btl_wv_rem_srq_info_t *rem_srqs;
98  /* Vendor id of remote HCA */
99  uint32_t rem_vendor_id;
100  /* Vendor part id of remote HCA */
101  uint32_t rem_vendor_part_id;
102  /* Transport type of remote port */
103  mca_btl_wv_transport_type_t rem_transport_type;
105 
106 
107 /**
108  * Agggregates all per peer qp info for an endpoint
109  */
111  int32_t sd_credits; /**< this rank's view of the credits
112  * available for sending:
113  * this is the credits granted by the
114  * remote peer which has some relation to the
115  * number of receive buffers posted remotely
116  */
117  int32_t rd_posted; /**< number of descriptors posted to the nic*/
118  int32_t rd_credits; /**< number of credits to return to peer */
119  int32_t cm_received; /**< Credit messages received */
120  int32_t cm_return; /**< how may credits to return */
121  int32_t cm_sent; /**< Outstanding number of credit messages */
123 
124 
125 /**
126  * Aggregates all srq qp info for an endpoint
127  */
129  int32_t dummy;
131 
132 typedef struct mca_btl_wv_qp_t {
133  struct wv_qp *lcl_qp;
134  uint32_t lcl_psn;
135  int32_t sd_wqe; /**< number of available send wqe entries */
136  int users;
137  opal_mutex_t lock;
139 
140 typedef struct mca_btl_wv_endpoint_qp_t {
141  mca_btl_wv_qp_t *qp;
142  opal_list_t no_credits_pending_frags[2]; /**< put fragment here if there is no credits
143  available */
144  opal_list_t no_wqe_pending_frags[2]; /**< put fragments here if there is no wqe
145  available */
146  int32_t rd_credit_send_lock; /**< Lock credit send fragment */
147  mca_btl_wv_send_control_frag_t *credit_frag;
148  size_t ib_inline_max; /**< max size of inline send*/
149  union {
152  } u;
154 
155 /**
156  * An abstraction that represents a connection to a endpoint process.
157  * An instance of mca_btl_base_endpoint_t is associated w/ each process
158  * and BTL pair at startup. However, connections to the endpoint
159  * are established dynamically on an as-needed basis:
160  */
161 
163  opal_list_item_t super;
164 
165  /** BTL module that created this connection */
167 
168  /** proc structure corresponding to endpoint */
170 
171  /** local CPC to connect to this endpoint */
173 
174  /** hook for local CPC to hang endpoint-specific data */
176 
177  /** If endpoint_local_cpc->cbm_uses_cts is true and this endpoint
178  is iWARP, then endpoint_initiator must be true on the side
179  that actually initiates the QP, false on the other side. This
180  bool is used to know which way to send the first CTS
181  message. */
182  bool endpoint_initiator;
183 
184  /** pointer to remote proc's CPC data (essentially its CPC modex
185  message) */
187 
188  /** current state of the connection */
189  mca_btl_wv_endpoint_state_t endpoint_state;
190 
191  /** number of connection retries attempted */
192  size_t endpoint_retries;
193 
194  /** timestamp of when the first connection was attempted */
195  double endpoint_tstamp;
196 
197  /** lock for concurrent access to endpoint state */
199 
200  /** list of pending frags due to lazy connection establishment
201  for this endpotint */
203 
205 
206  /** list of pending rget ops */
208  /** list of pending rput ops */
210 
211  /** number of available get tokens */
212  int32_t get_tokens;
213 
214  /** subnet id of this endpoint*/
215  uint64_t subnet_id;
216 
217  /** number of eager received */
218  int32_t eager_recv_count;
219  /** info about remote RDMA buffer */
221  /** info about local RDMA buffer */
223  /** index of the endpoint in endpoints array */
224  int32_t index;
225 
226  /** does the endpoint require network byte ordering? */
227  bool nbo;
228  /** use eager rdma for this peer? */
229  bool use_eager_rdma;
230 
231  /** information about the remote port */
233 
234  /** Frag for initial wireup CTS protocol; will be NULL if CPC
235  indicates that it does not want to use CTS */
237  /** Memory registration info for the CTS frag */
239 
240  /** Whether we've posted receives on this EP or not (only used in
241  CTS protocol) */
243 
244  /** Whether we've received the CTS from the peer or not (only used
245  in CTS protocol) */
247 
248  /** Whether we've send out CTS to the peer or not (only used in
249  CTS protocol) */
250  bool endpoint_cts_sent;
251 };
252 
255 
257 
258 static inline int32_t qp_get_wqe(mca_btl_wv_endpoint_t *ep, const int qp)
259 {
260  return OPAL_THREAD_ADD32(&ep->qps[qp].qp->sd_wqe, -1);
261 }
262 
263 static inline int32_t qp_put_wqe(mca_btl_wv_endpoint_t *ep, const int qp)
264 {
265  return OPAL_THREAD_ADD32(&ep->qps[qp].qp->sd_wqe, 1);
266 }
267 
268 int mca_btl_wv_endpoint_send(mca_btl_base_endpoint_t*,
270 int mca_btl_wv_endpoint_post_send(mca_btl_wv_endpoint_t*,
272 void mca_btl_wv_endpoint_send_credits(mca_btl_base_endpoint_t*, const int);
273 void mca_btl_wv_endpoint_connect_eager_rdma(mca_btl_wv_endpoint_t*);
274 int mca_btl_wv_endpoint_post_recvs(mca_btl_wv_endpoint_t*);
275 void mca_btl_wv_endpoint_send_cts(mca_btl_wv_endpoint_t *endpoint);
276 void mca_btl_wv_endpoint_cpc_complete(mca_btl_wv_endpoint_t*);
277 void mca_btl_wv_endpoint_connected(mca_btl_wv_endpoint_t*);
278 void mca_btl_wv_endpoint_init(mca_btl_wv_module_t*,
281  struct mca_btl_wv_proc_modex_t *remote_proc_info,
282  ompi_btl_wv_connect_base_module_data_t *remote_cpc_data);
283 
284 /*
285  * Invoke an error on the btl associated with an endpoint. If we
286  * don't have an endpoint, then just use the first one on the
287  * component list of BTLs.
288  */
289 void *mca_btl_wv_endpoint_invoke_error(void *endpoint);
290 
291 static inline int post_recvs(mca_btl_base_endpoint_t *ep, const int qp,
292  const int num_post)
293 {
294  int i;
295  struct wv_recv_wr *wr = NULL;
296  mca_btl_wv_module_t *wv_btl = ep->endpoint_btl;
297  HRESULT hr = 0;
298  if(0 == num_post)
299  return OMPI_SUCCESS;
300  for(i = 0; i < num_post; i++) {
301  int rc;
302  ompi_free_list_item_t* item;
303  OMPI_FREE_LIST_WAIT(&wv_btl->device->qps[qp].recv_free, item, rc);
304  to_base_frag(item)->base.order = qp;
305  to_com_frag(item)->endpoint = ep;
306  wr = &to_recv_frag(item)->rd_desc;
307  hr = ep->qps[qp].qp->lcl_qp->handle->PostReceive(wr->wr_id,wr->sg_list,
308  wr->num_sge);
309  }
310  if(SUCCEEDED(hr)) {
311  return OMPI_SUCCESS;
312  }else {
313  BTL_ERROR(("posting receive on qp %d", qp));
314  return OMPI_ERROR;
315  }
316 }
317 
318 static inline int mca_btl_wv_endpoint_post_rr_nolock(
319  mca_btl_base_endpoint_t *ep, const int qp)
320 {
321  int rd_rsv = mca_btl_wv_component.qp_infos[qp].u.pp_qp.rd_rsv;
322  int rd_num = mca_btl_wv_component.qp_infos[qp].rd_num;
323  int rd_low = mca_btl_wv_component.qp_infos[qp].rd_low;
324  int cqp = mca_btl_wv_component.credits_qp, rc;
325  int cm_received = 0, num_post = 0;
326 
327  assert(BTL_WV_QP_TYPE_PP(qp));
328 
329  if(ep->qps[qp].u.pp_qp.rd_posted <= rd_low)
330  num_post = rd_num - ep->qps[qp].u.pp_qp.rd_posted;
331 
332  assert(num_post >= 0);
333 
334  if(ep->qps[qp].u.pp_qp.cm_received >= (rd_rsv >> 2))
335  cm_received = ep->qps[qp].u.pp_qp.cm_received;
336 
337  if((rc = post_recvs(ep, qp, num_post)) != OMPI_SUCCESS) {
338  return rc;
339  }
340  OPAL_THREAD_ADD32(&ep->qps[qp].u.pp_qp.rd_posted, num_post);
341  OPAL_THREAD_ADD32(&ep->qps[qp].u.pp_qp.rd_credits, num_post);
342 
343  /* post buffers for credit management on credit management qp */
344  if((rc = post_recvs(ep, cqp, cm_received)) != OMPI_SUCCESS) {
345  return rc;
346  }
347  OPAL_THREAD_ADD32(&ep->qps[qp].u.pp_qp.cm_return, cm_received);
348  OPAL_THREAD_ADD32(&ep->qps[qp].u.pp_qp.cm_received, -cm_received);
349 
350  assert(ep->qps[qp].u.pp_qp.rd_credits <= rd_num &&
351  ep->qps[qp].u.pp_qp.rd_credits >= 0);
352 
353  return OMPI_SUCCESS;
354 }
355 
356 static inline int mca_btl_wv_endpoint_post_rr(
357  mca_btl_base_endpoint_t *ep, const int qp)
358 {
359  int ret;
361  ret = mca_btl_wv_endpoint_post_rr_nolock(ep, qp);
363  return ret;
364 }
365 
366 #define BTL_WV_CREDITS_SEND_TRYLOCK(E, Q) \
367  OPAL_ATOMIC_CMPSET_32(&(E)->qps[(Q)].rd_credit_send_lock, 0, 1)
368 #define BTL_WV_CREDITS_SEND_UNLOCK(E, Q) \
369  OPAL_ATOMIC_CMPSET_32(&(E)->qps[(Q)].rd_credit_send_lock, 1, 0)
370 #define BTL_WV_GET_CREDITS(FROM, TO) \
371  do { \
372  TO = FROM; \
373  } while(0 == OPAL_ATOMIC_CMPSET_32(&FROM, TO, 0))
374 
375 
376 static inline bool check_eager_rdma_credits(const mca_btl_wv_endpoint_t *ep)
377 {
378  return (ep->eager_rdma_local.credits > ep->eager_rdma_local.rd_win) ? true :
379  false;
380 }
381 
382 static inline bool
383 check_send_credits(const mca_btl_wv_endpoint_t *ep, const int qp)
384 {
385 
386  if(!BTL_WV_QP_TYPE_PP(qp))
387  return false;
388 
389  return (ep->qps[qp].u.pp_qp.rd_credits >=
390  mca_btl_wv_component.qp_infos[qp].u.pp_qp.rd_win) ? true : false;
391 }
392 
393 static inline void send_credits(mca_btl_wv_endpoint_t *ep, int qp)
394 {
395  if(BTL_WV_QP_TYPE_PP(qp)) {
396  if(check_send_credits(ep, qp))
397  goto try_send;
398  } else {
399  qp = mca_btl_wv_component.credits_qp;
400  }
401 
402  if(!check_eager_rdma_credits(ep))
403  return;
404 
405 try_send:
406  if(BTL_WV_CREDITS_SEND_TRYLOCK(ep, qp))
407  mca_btl_wv_endpoint_send_credits(ep, qp);
408 }
409 
410 static inline int check_endpoint_state(mca_btl_wv_endpoint_t *ep,
411  mca_btl_base_descriptor_t *des, opal_list_t *pending_list)
412 {
413  int rc = OMPI_ERR_RESOURCE_BUSY;
414 
415  switch(ep->endpoint_state) {
416  case MCA_BTL_IB_CLOSED:
418  if (OMPI_SUCCESS == rc) {
419  rc = OMPI_ERR_RESOURCE_BUSY;
420  }
421  /*
422  * As long as we expect a message from the peer (in order
423  * to setup the connection) let the event engine pool the
424  * OOB events. Note: we increment it once peer active
425  * connection.
426  */
428  /* fall through */
429  default:
430  opal_list_append(pending_list, (opal_list_item_t *)des);
431  break;
432  case MCA_BTL_IB_FAILED:
433  rc = OMPI_ERR_UNREACH;
434  break;
435  case MCA_BTL_IB_CONNECTED:
436  rc = OMPI_SUCCESS;
437  break;
438  }
439 
440  return rc;
441 }
442 
443 static inline __opal_attribute_always_inline__ int
444 ib_send_flags(uint32_t size, mca_btl_wv_endpoint_qp_t *qp)
445 {
446  return WV_SEND_SIGNALED |
447  ((size <= qp->ib_inline_max) ? WV_SEND_INLINE : 0);
448 }
449 
450 static inline int
451 acquire_eager_rdma_send_credit(mca_btl_wv_endpoint_t *endpoint)
452 {
453  if(OPAL_THREAD_ADD32(&endpoint->eager_rdma_remote.tokens, -1) < 0) {
455  return OMPI_ERR_OUT_OF_RESOURCE;
456  }
457 
458  return OMPI_SUCCESS;
459 }
460 
461 #define ntohll(x) (((_int64)(ntohl((int)((x << 32) >> 32))) << 32)|(unsigned int)ntohl(((int)(x >> 32))))
462 #define htonll(x) ntohll(x)
463 
464 static inline int post_send(mca_btl_wv_endpoint_t *ep,
465  mca_btl_wv_send_frag_t *frag, const bool rdma)
466 {
467  mca_btl_wv_module_t *wv_btl = ep->endpoint_btl;
468  mca_btl_base_segment_t *seg = &to_base_frag(frag)->segment;
469  WV_SGE *sg = &to_com_frag(frag)->sg_entry;
470  WV_SEND_REQUEST *sr_desc = &to_out_frag(frag)->sr_desc;
471  WV_SEND_REQUEST *bad_wr;
472  HRESULT hr = 0;
473  int qp = to_base_frag(frag)->base.order;
474  sg->Length = seg->seg_len + sizeof(mca_btl_wv_header_t) +
475  (rdma ? sizeof(mca_btl_wv_footer_t) : 0) + frag->coalesced_length;
476  sr_desc->Flags = ib_send_flags(sg->Length, &(ep->qps[qp]));
477  if(ep->nbo)
478  BTL_WV_HEADER_HTON(*frag->hdr);
479  if(rdma) {
480  int32_t head;
481  mca_btl_wv_footer_t* ftr =
482  (mca_btl_wv_footer_t*)(((char*)frag->hdr) + sg->Length -
483  sizeof(mca_btl_wv_footer_t));
484  sr_desc->Opcode = WvRdmaWrite;
485  MCA_BTL_WV_RDMA_FRAG_SET_SIZE(ftr, sg->Length);
486  MCA_BTL_WV_RDMA_MAKE_LOCAL(ftr);
487  if(ep->nbo)
488  BTL_WV_FOOTER_HTON(*ftr);
489  sr_desc->Wr.Rdma.Rkey = htonl(ep->eager_rdma_remote.rkey);
490  MCA_BTL_WV_RDMA_MOVE_INDEX(ep->eager_rdma_remote.head, head);
491  sr_desc->Wr.Rdma.RemoteAddress =
492  ep->eager_rdma_remote.base.lval +
493  head * wv_btl->eager_rdma_frag_size +
494  sizeof(mca_btl_wv_header_t) +
495  mca_btl_wv_component.eager_limit +
496  sizeof(mca_btl_wv_footer_t);
497  sr_desc->Wr.Rdma.RemoteAddress = htonll(sr_desc->Wr.Rdma.RemoteAddress - sg->Length);
498  } else {
499  if(BTL_WV_QP_TYPE_PP(qp)) {
500  sr_desc->Opcode = WvSend;
501  } else {
502  sr_desc->Opcode = WvSend;
503  sr_desc->Flags |= WV_SEND_IMMEDIATE;
504  sr_desc->ImmediateData = ep->rem_info.rem_index;
505  }
506  }
507  assert(sg->pAddress == (void*)(uintptr_t)frag->hdr);
508  hr = ep->qps[qp].qp->lcl_qp->handle->PostSend(sr_desc,
509  (WV_SEND_REQUEST**)&bad_wr);
510  if(SUCCEEDED(hr))
511  return 0;
512  else
513  return 1;
514 }
515 
516 END_C_DECLS
517 
518 #endif
struct mca_btl_wv_module_t * endpoint_btl
BTL module that created this connection.
Definition: btl_wv_endpoint.h:166
IB BTL Interface.
Definition: btl_wv.h:391
int32_t cm_received
Credit messages received.
Definition: btl_wv_endpoint.h:119
Definition: btl_wv_endpoint.h:73
Represents the state of a remote process and the set of addresses that it exports.
Definition: btl_wv_proc.h:61
#define OPAL_THREAD_ADD32(x, y)
Use an atomic operation for increment/decrement if opal_using_threads() indicates that threads are in...
Definition: mutex.h:367
mca_btl_openib_eager_rdma_local_t eager_rdma_local
info about local RDMA buffer
Definition: btl_openib_endpoint.h:227
Agggregates all per peer qp info for an endpoint.
Definition: btl_wv_endpoint.h:110
A descriptor that holds the parameters to a send/put/get operation along w/ a callback routine that i...
Definition: btl.h:275
OPAL output stream facility.
int32_t rd_credit_send_lock
Lock credit send fragment.
Definition: btl_wv_endpoint.h:146
int32_t head
RDMA buffer to post to.
Definition: btl_openib_eager_rdma.h:37
Definition: btl_wv_def.h:85
opal_list_t no_wqe_pending_frags[2]
put fragments here if there is no wqe available
Definition: btl_wv_endpoint.h:144
int32_t credits
number of RDMA credits
Definition: btl_openib_eager_rdma.h:24
int32_t index
index of the endpoint in endpoints array
Definition: btl_openib_endpoint.h:229
int32_t eager_recv_count
number of eager received
Definition: btl_openib_endpoint.h:223
struct wv_mr * endpoint_cts_mr
Memory registration info for the CTS frag.
Definition: btl_wv_endpoint.h:238
int32_t sd_wqe
number of available send wqe entries
Definition: btl_wv_endpoint.h:135
int32_t sd_wqe
number of available send wqe entries
Definition: btl_openib_endpoint.h:135
OPAL_DECLSPEC void opal_progress_event_users_increment(void)
Increase the number of users of the event library.
Definition: opal_progress.c:245
Data received from the modex.
Definition: btl_wv_proc.h:44
bool endpoint_initiator
If endpoint_local_cpc->cbm_uses_cts is true and this endpoint is iWARP, then endpoint_initiator must ...
Definition: btl_openib_endpoint.h:182
ompi_free_list_t recv_free
free lists of receive buffer descriptors
Definition: btl_wv.h:319
Meta data about a CPC module.
Definition: connect.h:303
size_t eager_limit
Maximum send size, in Bytes.
Definition: btl_wv.h:187
int32_t rd_posted
number of descriptors posted to the nic
Definition: btl_wv_endpoint.h:117
int32_t get_tokens
number of available get tokens
Definition: btl_openib_endpoint.h:214
Aggregates all srq qp info for an endpoint.
Definition: btl_wv_endpoint.h:128
Definition: mutex_unix.h:53
ompi_btl_openib_connect_base_module_start_connect_fn_t cbm_start_connect
Connect function.
Definition: connect.h:336
Definition: btl_wv_eager_rdma.h:18
IB fragment derived type.
Definition: btl_wv_frag.h:213
mca_btl_wv_qp_info_t * qp_infos
Eager send limit of first fragment, in Bytes.
Definition: btl_wv.h:184
Definition: btl_wv_frag.h:259
Definition: btl_wv_endpoint.h:140
The opal_list_t interface is used to provide a generic doubly-linked list container for Open MPI...
mca_btl_openib_endpoint_state_t endpoint_state
current state of the connection
Definition: btl_openib_endpoint.h:189
Definition: btl_wv_def.h:101
int32_t cm_received
Credit messages received.
Definition: btl_openib_endpoint.h:119
size_t ib_inline_max
max size of inline send
Definition: btl_wv_endpoint.h:148
Definition: btl_wv_eager_rdma.h:34
ompi_btl_wv_connect_base_module_t * endpoint_local_cpc
local CPC to connect to this endpoint
Definition: btl_wv_endpoint.h:172
size_t endpoint_retries
number of connection retries attempted
Definition: btl_openib_endpoint.h:192
Definition: opal_list.h:98
#define OPAL_THREAD_LOCK(mutex)
Lock a mutex if opal_using_threads() says that multiple threads may be active in the process...
Definition: mutex.h:223
opal_mutex_t endpoint_lock
lock for concurrent access to endpoint state
Definition: btl_openib_endpoint.h:198
struct mca_btl_elan_module_t * endpoint_btl
BTL instance that created this connection.
Definition: btl_elan_endpoint.h:36
#define OPAL_THREAD_UNLOCK(mutex)
Unlock a mutex if opal_using_threads() says that multiple threads may be active in the process...
Definition: mutex.h:309
mca_btl_openib_rem_info_t rem_info
information about the remote port
Definition: btl_openib_endpoint.h:237
int32_t rd_posted
number of descriptors posted to the nic
Definition: btl_openib_endpoint.h:117
ompi_ptr_t base
address of remote buffer
Definition: btl_openib_eager_rdma.h:35
bool use_eager_rdma
use eager rdma for this peer?
Definition: btl_openib_endpoint.h:234
ompi_btl_wv_connect_base_module_data_t * endpoint_remote_cpc_data
pointer to remote proc's CPC data (essentially its CPC modex message)
Definition: btl_wv_endpoint.h:186
ompi_btl_openib_connect_base_module_t * endpoint_local_cpc
local CPC to connect to this endpoint
Definition: btl_openib_endpoint.h:172
double endpoint_tstamp
timestamp of when the first connection was attempted
Definition: btl_openib_endpoint.h:195
mca_btl_wv_endpoint_state_t endpoint_state
current state of the connection
Definition: btl_wv_endpoint.h:189
mca_btl_wv_eager_rdma_remote_t eager_rdma_remote
info about remote RDMA buffer
Definition: btl_wv_endpoint.h:220
Definition: btl_wv_def.h:62
size_t eager_rdma_frag_size
length of eager frag
Definition: btl_wv.h:418
void * endpoint_local_cpc_data
hook for local CPC to hang endpoint-specific data
Definition: btl_openib_endpoint.h:175
#define opal_list_append(l, i)
Append an item to the end of the list.
Definition: opal_list.h:410
mca_btl_wv_eager_rdma_local_t eager_rdma_local
info about local RDMA buffer
Definition: btl_wv_endpoint.h:222
Byte Transfer Layer (BTL)
struct mca_btl_wv_proc_t * endpoint_proc
proc structure corresponding to endpoint
Definition: btl_wv_endpoint.h:169
mca_btl_wv_rem_info_t rem_info
information about the remote port
Definition: btl_wv_endpoint.h:232
uint32_t seg_len
Length in bytes.
Definition: btl.h:240
opal_list_t no_credits_pending_frags[2]
put fragment here if there is no credits available
Definition: btl_wv_endpoint.h:142
int32_t cm_sent
Outstanding number of credit messages.
Definition: btl_wv_endpoint.h:121
State of ELAN endpoint connection.
Definition: btl_elan_endpoint.h:33
Definition: ompi_free_list.h:62
int32_t sd_credits
this rank's view of the credits available for sending: this is the credits granted by the remote peer...
Definition: btl_wv_endpoint.h:111
Definition: btl_wv_frag.h:247
int32_t cm_return
how may credits to return
Definition: btl_wv_endpoint.h:120
int32_t rd_credits
number of credits to return to peer
Definition: btl_wv_endpoint.h:118
int32_t cm_return
how may credits to return
Definition: btl_openib_endpoint.h:120
int32_t tokens
number of rdam tokens
Definition: btl_openib_eager_rdma.h:38
Definition: opal_list.h:147
opal_list_t pending_put_frags
list of pending rput ops
Definition: btl_openib_endpoint.h:211
Definition: btl_wv_frag.h:34
bool nbo
does the endpoint require network byte ordering?
Definition: btl_openib_endpoint.h:232
uint64_t subnet_id
subnet id of this endpoint
Definition: btl_openib_endpoint.h:217
Definition: btl_wv_endpoint.h:80
mca_btl_openib_eager_rdma_remote_t eager_rdma_remote
info about remote RDMA buffer
Definition: btl_openib_endpoint.h:225
Struct for holding CPC module and associated meta data.
Definition: connect.h:328
opal_list_t pending_lazy_frags
list of pending frags due to lazy connection establishment for this endpotint
Definition: btl_openib_endpoint.h:202
Definition: btl_wv_endpoint.h:132
opal_list_t pending_get_frags
list of pending rget ops
Definition: btl_openib_endpoint.h:209
bool endpoint_posted_recvs
Whether we've posted receives on this EP or not (only used in CTS protocol)
Definition: btl_openib_endpoint.h:247
Definition: btl_wv_endpoint.h:85
int32_t rd_credits
number of credits to return to peer
Definition: btl_openib_endpoint.h:118
bool endpoint_cts_received
Whether we've received the CTS from the peer or not (only used in CTS protocol)
Definition: btl_openib_endpoint.h:251
Describes a region/segment of memory that is addressable by an BTL.
Definition: btl.h:236
bool endpoint_cts_sent
Whether we've send out CTS to the peer or not (only used in CTS protocol)
Definition: btl_openib_endpoint.h:255
#define OBJ_CLASS_DECLARATION(NAME)
Declaration for class descriptor.
Definition: opal_object.h:236
mca_btl_wv_recv_frag_t endpoint_cts_frag
Frag for initial wireup CTS protocol; will be NULL if CPC indicates that it does not want to use CTS...
Definition: btl_wv_endpoint.h:236
uint32_t rkey
RKey for accessing remote buffer.
Definition: btl_openib_eager_rdma.h:36