OpenMPI  0.1.1
btl_sm_fifo.h
1 #ifndef MCA_BTL_SM_FIFO_H
2 #define MCA_BTL_SM_FIFO_H
3 
4 #include "btl_sm.h"
5 #include "btl_sm_endpoint.h"
6 
7 static void
8 add_pending(struct mca_btl_base_endpoint_t *ep, void *data, bool resend)
9 {
10  int rc;
13  OPAL_FREE_LIST_GET(&mca_btl_sm_component.pending_send_fl, i, rc);
14 
15  /* don't handle error for now */
16  assert(i != NULL && rc == OMPI_SUCCESS);
17 
19  si->data = data;
20 
21  OPAL_THREAD_ADD32(&mca_btl_sm_component.num_pending_sends, +1);
22 
23  /* if data was on pending send list then prepend it to the list to
24  * minimize reordering */
26  if (resend)
28  else
31 }
32 
33 /*
34  * FIFO_MAP(x) defines which FIFO on the receiver should be used
35  * by sender rank x. The map is some many-to-one hash.
36  *
37  * FIFO_MAP_NUM(n) defines how many FIFOs the receiver has for
38  * n senders.
39  *
40  * That is,
41  *
42  * for all 0 <= x < n:
43  *
44  * 0 <= FIFO_MAP(x) < FIFO_MAP_NUM(n)
45  *
46  * For example, using some power-of-two nfifos, we could have
47  *
48  * FIFO_MAP(x) = x & (nfifos-1)
49  * FIFO_MAP_NUM(n) = min(nfifos,n)
50  *
51  * Interesting limits include:
52  *
53  * nfifos very large: In this case, each sender has its
54  * own dedicated FIFO on each receiver and the receiver
55  * has one FIFO per sender.
56  *
57  * nfifos == 1: In this case, all senders use the same
58  * FIFO and each receiver has just one FIFO for all senders.
59  */
60 #define FIFO_MAP(x) ((x) & (mca_btl_sm_component.nfifos - 1))
61 #define FIFO_MAP_NUM(n) ( (mca_btl_sm_component.nfifos) < (n) ? (mca_btl_sm_component.nfifos) : (n) )
62 
63 
64 #define MCA_BTL_SM_FIFO_WRITE(endpoint_peer, my_smp_rank, \
65  peer_smp_rank, hdr, resend, retry_pending_sends, rc) \
66 do { \
67  sm_fifo_t* fifo = &(mca_btl_sm_component.fifo[peer_smp_rank][FIFO_MAP(my_smp_rank)]); \
68  \
69  if ( retry_pending_sends ) { \
70  if ( 0 < opal_list_get_size(&endpoint_peer->pending_sends) ) { \
71  btl_sm_process_pending_sends(endpoint_peer); \
72  } \
73  } \
74  \
75  opal_atomic_lock(&(fifo->head_lock)); \
76  /* post fragment */ \
77  if(sm_fifo_write(hdr, fifo) != OMPI_SUCCESS) { \
78  add_pending(endpoint_peer, hdr, resend); \
79  rc = OMPI_ERR_RESOURCE_BUSY; \
80  } else { \
81  MCA_BTL_SM_SIGNAL_PEER(endpoint_peer); \
82  rc = OMPI_SUCCESS; \
83  } \
84  opal_atomic_unlock(&(fifo->head_lock)); \
85 } while(0)
86 
87 #endif
#define OPAL_THREAD_ADD32(x, y)
Use an atomic operation for increment/decrement if opal_using_threads() indicates that threads are in...
Definition: mutex.h:367
Definition: opal_list.h:98
#define OPAL_THREAD_LOCK(mutex)
Lock a mutex if opal_using_threads() says that multiple threads may be active in the process...
Definition: mutex.h:223
opal_mutex_t endpoint_lock
lock for concurrent access to endpoint state
Definition: btl_openib_endpoint.h:198
#define OPAL_THREAD_UNLOCK(mutex)
Unlock a mutex if opal_using_threads() says that multiple threads may be active in the process...
Definition: mutex.h:309
Definition: opal_free_list.h:47
Definition: btl_sm.h:246
#define opal_list_append(l, i)
Append an item to the end of the list.
Definition: opal_list.h:410
State of ELAN endpoint connection.
Definition: btl_elan_endpoint.h:33
int num_pending_sends
total number on all of my pending-send queues
Definition: btl_sm.h:167
static void opal_list_prepend(opal_list_t *list, opal_list_item_t *item)
Prepend an item to the beginning of the list.
Definition: opal_list.h:469
opal_list_t pending_sends
pending data to send
Definition: btl_sm_endpoint.h:39