/* $Id$ */
/*
- * Copyright (c) 2008 Stanford University.
+ * Copyright (c) 2008-9 Stanford University.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*/
/**
- * This component contains the forwarding path
- * of the standard CTP implementation packaged with
- * TinyOS 2.x. The CTP specification can be found in TEP 123.
- * The paper entitled "Collection Tree Protocol," by Omprakash
- * Gnawali et al., in SenSys 2009, describes the implementation and
- * provides detailed performance results.</p>
+ * This component contains the forwarding path of CTP Noe, the
+ * standard CTP implementation packaged with TinyOS 2.x. The CTP
+ * specification can be found in TEP 123. The paper entitled
+ * "Collection Tree Protocol," by Omprakash Gnawali et al., in SenSys
+ * 2009, describes the implementation and provides detailed
+ * performance results of CTP Noe.</p>
*
* <p>The CTP ForwardingEngine is responsible for queueing and
* scheduling outgoing packets. It maintains a pool of forwarding
* C</i>. This implementation several configuration constants, which
* can be found in <code>ForwardingEngine.h</code>.</p>
*
- * <p>Packets in the send queue are sent in FIFO order, with head-of-line
- * blocking. Because this is a tree collection protocol, all packets are going
- * to the same destination, and so the ForwardingEngine does not distinguish
- * packets from one another. Packets from CollectionSenderC clients are
- * sent identically to forwarded packets: only their buffer handling is
- different.</p>
+ * <p>Packets in the send queue are sent in FIFO order, with
+ * head-of-line blocking. Because this is a tree collection protocol,
+ * all packets are going to the same destination, and so the
+ * ForwardingEngine does not distinguish packets from one
+ * another. Packets from CollectionSenderC clients are sent
+ * identically to forwarded packets: only their buffer handling is
+ * different.</p>
*
* <p>If ForwardingEngine is on top of a link layer that supports
* synchronous acknowledgments, it enables them and retransmits packets
* before giving up and dropping the packet. MAX_RETRIES is typically a
* large number (e.g., >20), as this implementation assumes there is
* link layer feedback on failed packets, such that link costs will go
- * up and cause the routing layer to pick a next hop.</p>
+ * up and cause the routing layer to pick a next hop. If the underlying
+ * link layer does not support acknowledgments, ForwardingEngine sends
+ * a packet only once.</p>
*
* <p>The ForwardingEngine detects routing loops and tries to correct
- * them. Routing is in terms of a cost gradient, where the collection root
- * has a cost of zero and a node's cost is the cost of its next hop plus
- * the cost of the link to that next hop.
- * If there are no loops, then this gradient value decreases monotonically
- * along a route. When the ForwardingEngine
- * sends a packet to the next hop, it puts the local gradient value in
- * the packet header. If a node receives a packet to forward whose
- * gradient value is less than its own, then the gradient is not monotonically
- * decreasing and there may be a routing loop. When the ForwardingEngine
- * receives such a packet, it tells the RoutingEngine to advertise its
- * gradient value soon, with the hope that the advertisement will update
- * the node who just sent a packet and break the loop. It also pauses the
- * before the next packet transmission, in hopes of giving the routing layer's
- * packet a priority.</p>
+ * them. Routing is in terms of a cost gradient, where the collection
+ * root has a cost of zero and a node's cost is the cost of its next
+ * hop plus the cost of the link to that next hop. If there are no
+ * loops, then this gradient value decreases monotonically along a
+ * route. When the ForwardingEngine sends a packet to the next hop,
+ * it puts the local gradient value in the packet header. If a node
+ * receives a packet to forward whose gradient value is less than its
+ * own, then the gradient is not monotonically decreasing and there
+ * may be a routing loop. When the ForwardingEngine receives such a
+ * packet, it tells the RoutingEngine to advertise its gradient value
+ * soon, with the hope that the advertisement will update the node
+ * who just sent a packet and break the loop. It also pauses the
+ * before the next packet transmission, in hopes of giving the
+ * routing layer's packet a priority.</p>
*
- * <p>ForwardingEngine times its packet transmissions. It differentiates
- * between four transmission cases: forwarding, success, ack failure,
- * and loop detection. In each case, the
- * ForwardingEngine waits a randomized period of time before sending the next
- * packet. This approach assumes that the network is operating at low
- * utilization; its goal is to prevent correlated traffic -- such as
- * nodes along a route forwarding packets -- from interfering with itself.</p>
+ * <p>ForwardingEngine times its packet transmissions. It
+ * differentiates between four transmission cases: forwarding,
+ * success, ack failure, and loop detection. In each case, the
+ * ForwardingEngine waits a randomized period of time before sending
+ * the next packet. This approach assumes that the network is
+ * operating at low utilization; its goal is to prevent correlated
+ * traffic -- such as nodes along a route forwarding packets -- from
+ * interfering with itself.</p>
*
- * <p>While this implementation can work on top of a variety of link estimators,
- * it is designed to work with a 4-bit link estimator (4B). Details on 4B can
- * be found in the HotNets paper "Four Bit Link Estimation" by Rodrigo Fonseca
- * et al. The forwarder provides the "ack" bit for each sent packet, telling the
- * estimator whether the packet was acknowledged.</p>
+ * <p>While this implementation can work on top of a variety of link
+ * estimators, it is designed to work with a 4-bit link estimator
+ * (4B). Details on 4B can be found in the HotNets paper "Four Bit
+ * Link Estimation" by Rodrigo Fonseca et al. The forwarder provides
+ * the "ack" bit for each sent packet, telling the estimator whether
+ * the packet was acknowledged.</p>
*
* @author Philip Levis
* @author Kyle Jamieson
// Start with all states false
uint8_t forwardingState = 0;
- /* Keep track of the last parent address we sent to, so that
- unacked packets to an old parent are not incorrectly attributed
- to a new parent. */
- am_addr_t lastParent;
-
/* Network-level sequence number, so that receivers
* can distinguish retransmissions from different packets. */
uint8_t seqno;
int i;
for (i = 0; i < CLIENT_COUNT; i++) {
clientPtrs[i] = clientEntries + i;
- dbg("Forwarder", "clientPtrs[%hhu] = %p\n", i, clientPtrs[i]);
+ dbg("CtpForwarder", "clientPtrs[%hhu] = %p\n", i, clientPtrs[i]);
}
loopbackMsgPtr = &loopbackMsg;
- lastParent = call AMPacket.address();
seqno = 0;
return SUCCESS;
}
/* sendTask is where the first phase of all send logic
* exists (the second phase is in SubSend.sendDone()). */
task void sendTask();
+
+ /* sendComplete is called by sendDone when it is done
+ * with a packet (either due to too many retransmissions or
+ * an acknowledgment). It frees mmemory appropriately and
+ * cleans up the sending state */
+ void sendComplete(fe_queue_entry_t* qe, message_t* msg, bool success);
/* ForwardingEngine keeps track of whether the underlying
radio is powered on. If not, it enqueues packets;
if (err == SUCCESS) {
setState(RADIO_ON);
if (!call SendQueue.empty()) {
- dbg("FHangBug", "%s posted sendTask.\n", __FUNCTION__);
post sendTask();
}
}
r %= window;
r += offset;
call RetxmitTimer.startOneShot(r);
- dbg("Forwarder", "Rexmit timer will fire in %hu ms\n", r);
+ dbg("CtpForwarder", "Rexmit timer will fire in %hu ms\n", r);
}
/*
* sending packets.
*/
event void UnicastNameFreeRouting.routeFound() {
- dbg("FHangBug", "%s posted sendTask.\n", __FUNCTION__);
post sendTask();
}
command error_t Send.send[uint8_t client](message_t* msg, uint8_t len) {
ctp_data_header_t* hdr;
fe_queue_entry_t *qe;
- dbg("Forwarder", "%s: sending packet from client %hhu: %x, len %hhu\n", __FUNCTION__, client, msg, len);
+ dbg("CtpForwarder", "%s: sending packet from client %hhu: %x, len %hhu\n", __FUNCTION__, client, msg, len);
if (!hasState(ROUTING_ON)) {return EOFF;}
if (len > call Send.maxPayloadLength[client]()) {return ESIZE;}
hdr->thl = 0;
if (clientPtrs[client] == NULL) {
- dbg("Forwarder", "%s: send failed as client is busy.\n", __FUNCTION__);
+ dbg("CtpForwarder", "%s: send failed as client is busy.\n", __FUNCTION__);
return EBUSY;
}
qe->msg = msg;
qe->client = client;
qe->retries = MAX_RETRIES;
- dbg("Forwarder", "%s: queue entry for %hhu is %hhu deep\n", __FUNCTION__, client, call SendQueue.size());
+ dbg("CtpForwarder", "%s: queue entry for %hhu is %hhu deep\n", __FUNCTION__, client, call SendQueue.size());
if (call SendQueue.enqueue(qe) == SUCCESS) {
if (hasState(RADIO_ON) && !hasState(SENDING)) {
- dbg("FHangBug", "%s posted sendTask.\n", __FUNCTION__);
post sendTask();
}
clientPtrs[client] = NULL;
return SUCCESS;
}
else {
- dbg("Forwarder",
+ dbg("CtpForwarder",
"%s: send failed as packet could not be enqueued.\n",
__FUNCTION__);
- // send a debug message to the uart
call CollectionDebug.logEvent(NET_C_FE_SEND_QUEUE_FULL);
// Return the pool entry, as it's not for me...
task void sendTask() {
uint16_t gradient;
- dbg("Forwarder", "%s: Trying to send a packet. Queue size is %hhu.\n", __FUNCTION__, call SendQueue.size());
+ dbg("CtpForwarder", "%s: Trying to send a packet. Queue size is %hhu.\n", __FUNCTION__, call SendQueue.size());
if (hasState(SENDING) || call SendQueue.empty()) {
call CollectionDebug.logEvent(NET_C_FE_SENDQUEUE_EMPTY);
return;
* is lost (e.g., a bug in the routing engine), we retry.
* Otherwise the forwarder might hang indefinitely. As this test
* doesn't require radio activity, the energy cost is minimal. */
- dbg("Forwarder", "%s: no route, don't send, try again in %i.\n", __FUNCTION__, NO_ROUTE_RETRY);
+ dbg("CtpForwarder", "%s: no route, don't send, try again in %i.\n", __FUNCTION__, NO_ROUTE_RETRY);
call RetxmitTimer.startOneShot(NO_ROUTE_RETRY);
call CollectionDebug.logEvent(NET_C_FE_NO_ROUTE);
return;
}
// Not a duplicate: we've decided we're going to send.
- dbg("Forwarder", "Sending queue entry %p\n", qe);
+ dbg("CtpForwarder", "Sending queue entry %p\n", qe);
if (call RootControl.isRoot()) {
/* Code path for roots: copy the packet and signal receive. */
payload = call Packet.getPayload(loopbackMsgPtr, call Packet.payloadLength(loopbackMsgPtr));
payloadLength = call Packet.payloadLength(loopbackMsgPtr);
- dbg("Forwarder", "%s: I'm a root, so loopback and signal receive.\n", __FUNCTION__);
+ dbg("CtpForwarder", "%s: I'm a root, so loopback and signal receive.\n", __FUNCTION__);
loopbackMsgPtr = signal Receive.receive[collectid](loopbackMsgPtr,
payload,
payloadLength);
if (subsendResult == SUCCESS) {
// Successfully submitted to the data-link layer.
setState(SENDING);
- dbg("Forwarder", "%s: subsend succeeded with %p.\n", __FUNCTION__, qe->msg);
+ dbg("CtpForwarder", "%s: subsend succeeded with %p.\n", __FUNCTION__, qe->msg);
return;
}
// The packet is too big: truncate it and retry.
else if (subsendResult == ESIZE) {
- dbg("Forwarder", "%s: subsend failed from ESIZE: truncate packet.\n", __FUNCTION__);
+ dbg("CtpForwarder", "%s: subsend failed from ESIZE: truncate packet.\n", __FUNCTION__);
call Packet.setPayloadLength(qe->msg, call Packet.maxPayloadLength());
post sendTask();
call CollectionDebug.logEvent(NET_C_FE_SUBSEND_SIZE);
}
else {
- dbg("Forwarder", "%s: subsend failed from %i\n", __FUNCTION__, (int)subsendResult);
+ dbg("CtpForwarder", "%s: subsend failed from %i\n", __FUNCTION__, (int)subsendResult);
}
}
}
event void SubSend.sendDone(message_t* msg, error_t error) {
fe_queue_entry_t *qe = call SendQueue.head();
- dbg("Forwarder", "%s to %hu and %hhu\n", __FUNCTION__, call AMPacket.destination(msg), error);
+ dbg("CtpForwarder", "%s to %hu and %hhu\n", __FUNCTION__, call AMPacket.destination(msg), error);
if (error != SUCCESS) {
/* The radio wasn't able to send the packet: retransmit it. */
- dbg("Forwarder", "%s: send failed\n", __FUNCTION__);
+ dbg("CtpForwarder", "%s: send failed\n", __FUNCTION__);
call CollectionDebug.logEventMsg(NET_C_FE_SENDDONE_FAIL,
call CollectionPacket.getSequenceNumber(msg),
call CollectionPacket.getOrigin(msg),
startRetxmitTimer(SENDDONE_FAIL_WINDOW, SENDDONE_FAIL_OFFSET);
}
else if (hasState(ACK_PENDING) && !call PacketAcknowledgements.wasAcked(msg)) {
- /* Retransmission for unacked packet. Might drop the packet. */
+ /* No ack: if countdown is not 0, retransmit, else drop the packet. */
call LinkEstimator.txNoAck(call AMPacket.destination(msg));
call CtpInfo.recomputeRoutes();
if (--qe->retries) {
- dbg("Forwarder", "%s: not acked\n", __FUNCTION__);
+ dbg("CtpForwarder", "%s: not acked, retransmit\n", __FUNCTION__);
call CollectionDebug.logEventMsg(NET_C_FE_SENDDONE_WAITACK,
call CollectionPacket.getSequenceNumber(msg),
call CollectionPacket.getOrigin(msg),
call AMPacket.destination(msg));
startRetxmitTimer(SENDDONE_NOACK_WINDOW, SENDDONE_NOACK_OFFSET);
} else {
- // <Max retries reached, dropping packet: first case is a client packet,
- // second case is a forwarded packet. Memory management for the
- // two is different.
- if (qe->client < CLIENT_COUNT) { // Client packet
- clientPtrs[qe->client] = qe;
- signal Send.sendDone[qe->client](msg, SUCCESS);
- call CollectionDebug.logEventMsg(NET_C_FE_SENDDONE_FAIL_ACK_SEND,
- call CollectionPacket.getSequenceNumber(msg),
- call CollectionPacket.getOrigin(msg),
- call AMPacket.destination(msg));
- } else { // Forwarded packet
- if (call MessagePool.put(qe->msg) != SUCCESS)
- call CollectionDebug.logEvent(NET_C_FE_PUT_MSGPOOL_ERR);
- if (call QEntryPool.put(qe) != SUCCESS)
- call CollectionDebug.logEvent(NET_C_FE_PUT_QEPOOL_ERR);
- call CollectionDebug.logEventMsg(NET_C_FE_SENDDONE_FAIL_ACK_FWD,
- call CollectionPacket.getSequenceNumber(msg),
- call CollectionPacket.getOrigin(msg),
- call AMPacket.destination(msg));
- }
- call SendQueue.dequeue();
+ /* Hit max retransmit threshold: drop the packet. */
+ call SendQueue.dequeue();
clearState(SENDING);
startRetxmitTimer(SENDDONE_OK_WINDOW, SENDDONE_OK_OFFSET);
+
+ sendComplete(qe, msg, FALSE);
}
}
else {
clearState(SENDING);
startRetxmitTimer(SENDDONE_OK_WINDOW, SENDDONE_OK_OFFSET);
call LinkEstimator.txAck(call AMPacket.destination(msg));
-
- if (qe->client < CLIENT_COUNT) {
- call CollectionDebug.logEventMsg(NET_C_FE_SENT_MSG,
- call CollectionPacket.getSequenceNumber(msg),
- call CollectionPacket.getOrigin(msg),
- call AMPacket.destination(msg));
- signal Send.sendDone[qe->client](msg, SUCCESS);
- dbg("Forwarder", "%s: our packet for client %hhu, remove %p from queue\n",
- __FUNCTION__, client, qe);
- clientPtrs[qe->client] = qe;
- }
- else if (call MessagePool.size() < call MessagePool.maxSize()) {
- // A successfully forwarded packet.
- dbg("Forwarder,Route", "%s: successfully forwarded packet (client: %hhu), message pool is %hhu/%hhu.\n", __FUNCTION__, qe->client, call MessagePool.size(), call MessagePool.maxSize());
- call CollectionDebug.logEventMsg(NET_C_FE_FWD_MSG,
- call CollectionPacket.getSequenceNumber(msg),
- call CollectionPacket.getOrigin(msg),
- call AMPacket.destination(msg));
- call SentCache.insert(qe->msg);
- if (call MessagePool.put(qe->msg) != SUCCESS)
- call CollectionDebug.logEvent(NET_C_FE_PUT_MSGPOOL_ERR);
- if (call QEntryPool.put(qe) != SUCCESS)
- call CollectionDebug.logEvent(NET_C_FE_PUT_QEPOOL_ERR);
- }
- else {
- dbg("Forwarder", "%s: BUG: we have a pool entry, but the pool is full, client is %hhu.\n", __FUNCTION__, qe->client);
- }
+ sendComplete(qe, msg, TRUE);
}
}
*/
message_t* ONE forward(message_t* ONE m) {
if (call MessagePool.empty()) {
- dbg("Route", "%s cannot forward, message pool empty.\n", __FUNCTION__);
- // send a debug message to the uart
+ dbg("CtpForwarder", "%s cannot forward, message pool empty.\n", __FUNCTION__);
call CollectionDebug.logEvent(NET_C_FE_MSG_POOL_EMPTY);
}
else if (call QEntryPool.empty()) {
- dbg("Route", "%s cannot forward, queue entry pool empty.\n",
- __FUNCTION__);
- // send a debug message to the uart
+ dbg("CtpForwarder", "%s cannot forward, queue entry pool empty.\n", __FUNCTION__);
call CollectionDebug.logEvent(NET_C_FE_QENTRY_POOL_EMPTY);
}
else {
qe = call QEntryPool.get();
if (qe == NULL) {
- call CollectionDebug.logEvent(NET_C_FE_GET_MSGPOOL_ERR);
+ call CollectionDebug.logEvent(NET_C_FE_GET_QEPOOL_ERR);
return m;
}
newMsg = call MessagePool.get();
if (newMsg == NULL) {
- call CollectionDebug.logEvent(NET_C_FE_GET_QEPOOL_ERR);
+ call QEntryPool.put(qe);
+ call CollectionDebug.logEvent(NET_C_FE_GET_MSGPOOL_ERR);
return m;
}
qe->retries = MAX_RETRIES;
- if (call SendQueue.enqueue(qe) == SUCCESS) {
- dbg("Forwarder,Route", "%s forwarding packet %p with queue size %hhu\n", __FUNCTION__, m, call SendQueue.size());
+ if (call SendQueue.enqueue(qe) != SUCCESS) {
+ // There was a problem enqueuing: drop packet and free memory
+ if (call MessagePool.put(newMsg) != SUCCESS)
+ call CollectionDebug.logEvent(NET_C_FE_PUT_MSGPOOL_ERR);
+ if (call QEntryPool.put(qe) != SUCCESS)
+ call CollectionDebug.logEvent(NET_C_FE_PUT_QEPOOL_ERR);
+
+ call CollectionDebug.logEvent(NET_C_FE_SEND_QUEUE_FULL);
+ // Fall to base case of just returning received buffer
+ }
+ else {
+ // Put the packet on the queue to send
+ dbg("CtpForwarder", "%s forwarding packet %p with queue size %hhu\n", __FUNCTION__, m, call SendQueue.size());
// Loop-detection code:
if (call CtpInfo.getEtx(&gradient) == SUCCESS) {
// We only check for loops if we know our own metric
if (call CtpPacket.getEtx(m) <= gradient) {
// If our etx metric is less than or equal to the etx value
// on the packet (etx of the previous hop node), then we believe
- // we are in a loop.
+ // we may be in a loop.
// Trigger a route update and backoff.
call CtpInfo.triggerImmediateRouteUpdate();
startRetxmitTimer(LOOPY_WINDOW, LOOPY_OFFSET);
if (!call RetxmitTimer.isRunning()) {
// sendTask is only immediately posted if we don't detect a
// loop.
- dbg("FHangBug", "%s: posted sendTask.\n", __FUNCTION__);
post sendTask();
}
// Successful function exit point:
return newMsg;
- } else {
- // There was a problem enqueuing to the send queue.
- if (call MessagePool.put(newMsg) != SUCCESS)
- call CollectionDebug.logEvent(NET_C_FE_PUT_MSGPOOL_ERR);
- if (call QEntryPool.put(qe) != SUCCESS)
- call CollectionDebug.logEvent(NET_C_FE_PUT_QEPOOL_ERR);
}
}
- // NB: at this point, we have a resource acquistion problem.
- // Log the event, and drop the
- // packet on the floor.
-
- call CollectionDebug.logEvent(NET_C_FE_SEND_QUEUE_FULL);
+ // Only reach this code if we weren't able to enqueue:
+ // return packet to link layer, "dropping" it.
return m;
}
* send history cache (in case we recently forwarded this packet).
* The cache is important as nodes immediately forward packets
* but wait a period before retransmitting after an ack failure.
- * If this node is a root, signal receive.
+ * If this node is a root, signal receive. If not, call
+ * forward().
+ *
+ * There are two standard forwarding call paths, which depend on
+ * whether the forwarding engine is already sending packets. If not,
+ * the forwarder can send immediately. If it is, then it just puts
+ * the packet on the queue and waits for the the forwarder to pull
+ * it off in time:
+ *
+ * sending: receive -> forward -> sendTask -> sendDone
+ * !sending: receive -> forwarder ... RetxtmitTimer -> sendTask -> sendDone
*/
event message_t*
SubReceive.receive(message_t* msg, void* payload, uint8_t len) {
collection_id_t collectid;
- bool duplicate = FALSE;
fe_queue_entry_t* qe;
uint8_t i, thl;
for (i = call SendQueue.size(); --i;) {
qe = call SendQueue.element(i);
if (call CtpPacket.matchInstance(qe->msg, msg)) {
- duplicate = TRUE;
- break;
+ call CollectionDebug.logEvent(NET_C_FE_DUPLICATE_QUEUE);
+ return msg;
}
}
}
-
- if (duplicate) {
- call CollectionDebug.logEvent(NET_C_FE_DUPLICATE_QUEUE);
- return msg;
- }
// If I'm the root, signal receive.
else if (call RootControl.isRoot())
call Packet.payloadLength(msg)))
return msg;
else {
- dbg("Route", "Forwarding packet from %hu.\n", getHeader(msg)->origin);
+ dbg("CtpForwarder", "Forwarding packet from %hu.\n", getHeader(msg)->origin);
return forward(msg);
}
}
(msg, payload + sizeof(ctp_data_header_t),
len - sizeof(ctp_data_header_t));
}
+
+
+ /*
+ * sendComplete is called by sendDone when it is done
+ * with a packet (either due to too many retransmissions or
+ * an acknowledgment). It frees memory appropriately and
+ * cleans up the sending state. For local packets, this
+ * means freeing the client's state and signaling sendDone;
+ * for forwarded packets it means returning the queue entry
+ * and packet to their respective pools.
+ *
+ *
+ */
+ void sendComplete(fe_queue_entry_t* qe, message_t* msg, bool success) {
+ // Four cases: local success, local failure, forwarded successes, forwarded failure
+ if (qe->client < CLIENT_COUNT) {
+ if (success) { // Local success
+ dbg("CtpForwarder", "%s: packet %hu.%hhu for client %hhu acknowledged.\n", __FUNCTION__, call CollectionPacket.getOrigin(msg), call CollectionPacket.getSequenceNumber(msg), qe->client);
+ call CollectionDebug.logEventMsg(NET_C_FE_SENT_MSG,
+ call CollectionPacket.getSequenceNumber(msg),
+ call CollectionPacket.getOrigin(msg),
+ call AMPacket.destination(msg));
+ } else { // Local failure
+ dbg("CtpForwarder", "%s: packet %hu.%hhu for client %hhu dropped.\n", __FUNCTION__, call CollectionPacket.getOrigin(msg), call CollectionPacket.getSequenceNumber(msg), qe->client);
+ call CollectionDebug.logEventMsg(NET_C_FE_SENDDONE_FAIL_ACK_SEND,
+ call CollectionPacket.getSequenceNumber(msg),
+ call CollectionPacket.getOrigin(msg),
+ call AMPacket.destination(msg));
+ }
+ // Client memory cleanup
+ clientPtrs[qe->client] = qe;
+ signal Send.sendDone[qe->client](msg, SUCCESS);
+ }
+ else {
+ if (success) { // Forwarded success
+ call SentCache.insert(qe->msg);
+ dbg("CtpForwarder", "%s: forwarded packet %hu.%hhu acknowledged: insert in transmit queue.\n", __FUNCTION__, call CollectionPacket.getOrigin(msg), call CollectionPacket.getSequenceNumber(msg));
+ call CollectionDebug.logEventMsg(NET_C_FE_FWD_MSG,
+ call CollectionPacket.getSequenceNumber(msg),
+ call CollectionPacket.getOrigin(msg),
+ call AMPacket.destination(msg));
+ }
+ else { // Forwarded failure
+ dbg("CtpForwarder", "%s: forwarded packet %hu.%hhu dropped.\n", __FUNCTION__, call CollectionPacket.getOrigin(msg), call CollectionPacket.getSequenceNumber(msg));
+ call CollectionDebug.logEventMsg(NET_C_FE_SENDDONE_FAIL_ACK_FWD,
+ call CollectionPacket.getSequenceNumber(msg),
+ call CollectionPacket.getOrigin(msg),
+ call AMPacket.destination(msg));
+ }
+ // Forwarding memory cleanup
+ if (call MessagePool.put(qe->msg) != SUCCESS)
+ call CollectionDebug.logEvent(NET_C_FE_PUT_MSGPOOL_ERR);
+ if (call QEntryPool.put(qe) != SUCCESS)
+ call CollectionDebug.logEvent(NET_C_FE_PUT_QEPOOL_ERR);
+ }
+ }
+
+
event void RetxmitTimer.fired() {
clearState(SENDING);
- dbg("FHangBug", "%s posted sendTask.\n", __FUNCTION__);
post sendTask();
}