process_mpa_request(ep);
break;
default:
- if (ep->com.so->so_rcv.sb_cc)
+ if (sbavail(&ep->com.so->so_rcv))
printf("%s Unexpected streaming data."
" ep %p state %d so %p so_state %x so_rcv.sb_cc %u so_rcv.sb_mb %p\n",
__FUNCTION__, ep, state_read(&ep->com), ep->com.so, ep->com.so->so_state,
- ep->com.so->so_rcv.sb_cc, ep->com.so->so_rcv.sb_mb);
+ sbavail(&ep->com.so->so_rcv), ep->com.so->so_rcv.sb_mb);
break;
}
return;
* Autosize the send buffer.
*/
if (snd->sb_flags & SB_AUTOSIZE && VNET(tcp_do_autosndbuf)) {
- if (snd->sb_cc >= (snd->sb_hiwat / 8 * 7) &&
- snd->sb_cc < VNET(tcp_autosndbuf_max)) {
+ if (sbused(snd) >= (snd->sb_hiwat / 8 * 7) &&
+ sbused(snd) < VNET(tcp_autosndbuf_max)) {
if (!sbreserve_locked(snd, min(snd->sb_hiwat +
VNET(tcp_autosndbuf_inc), VNET(tcp_autosndbuf_max)),
so, curthread))
INP_WLOCK_ASSERT(inp);
SOCKBUF_LOCK(so_rcv);
- KASSERT(toep->tp_enqueued >= so_rcv->sb_cc,
- ("%s: so_rcv->sb_cc > enqueued", __func__));
- toep->tp_rx_credits += toep->tp_enqueued - so_rcv->sb_cc;
- toep->tp_enqueued = so_rcv->sb_cc;
+ KASSERT(toep->tp_enqueued >= sbused(so_rcv),
+ ("%s: sbused(so_rcv) > enqueued", __func__));
+ toep->tp_rx_credits += toep->tp_enqueued - sbused(so_rcv);
+ toep->tp_enqueued = sbused(so_rcv);
SOCKBUF_UNLOCK(so_rcv);
must_send = toep->tp_rx_credits + 16384 >= tp->rcv_wnd;
so_sowwakeup_locked(so);
}
- if (snd->sb_sndptroff < snd->sb_cc)
+ if (snd->sb_sndptroff < sbused(snd))
t3_push_frames(so, 0);
out_free:
{
struct sockaddr_in *local, *remote;
- CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sb_cc %d", __func__,
- ep->com.so, ep, states[ep->com.state], ep->com.so->so_rcv.sb_cc);
+ CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
+ ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
switch (state_read(&ep->com)) {
case MPA_REQ_SENT:
process_mpa_request(ep);
break;
default:
- if (ep->com.so->so_rcv.sb_cc)
- log(LOG_ERR, "%s: Unexpected streaming data. "
- "ep %p, state %d, so %p, so_state 0x%x, sb_cc %u\n",
+ if (sbused(&ep->com.so->so_rcv))
+ log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
+ "state %d, so %p, so_state 0x%x, sbused %u\n",
__func__, ep, state_read(&ep->com), ep->com.so,
- ep->com.so->so_state, ep->com.so->so_rcv.sb_cc);
+ ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
break;
}
}
INP_WLOCK_ASSERT(inp);
SOCKBUF_LOCK(sb);
- KASSERT(toep->sb_cc >= sb->sb_cc,
+ KASSERT(toep->sb_cc >= sbused(sb),
("%s: sb %p has more data (%d) than last time (%d).",
- __func__, sb, sb->sb_cc, toep->sb_cc));
+ __func__, sb, sbused(sb), toep->sb_cc));
if (toep->ulp_mode == ULP_MODE_ISCSI) {
toep->rx_credits += toep->sb_cc;
toep->sb_cc = 0;
} else {
- toep->rx_credits += toep->sb_cc - sb->sb_cc;
- toep->sb_cc = sb->sb_cc;
+ toep->rx_credits += toep->sb_cc - sbused(sb);
+ toep->sb_cc = sbused(sb);
}
credits = toep->rx_credits;
SOCKBUF_UNLOCK(sb);
tp->rcv_nxt = be32toh(cpl->rcv_nxt);
toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE);
- KASSERT(toep->sb_cc >= sb->sb_cc,
+ KASSERT(toep->sb_cc >= sbused(sb),
("%s: sb %p has more data (%d) than last time (%d).",
- __func__, sb, sb->sb_cc, toep->sb_cc));
- toep->rx_credits += toep->sb_cc - sb->sb_cc;
+ __func__, sb, sbused(sb), toep->sb_cc));
+ toep->rx_credits += toep->sb_cc - sbused(sb);
#ifdef USE_DDP_RX_FLOW_CONTROL
toep->rx_credits -= m->m_len; /* adjust for F_RX_FC_DDP */
#endif
sbappendstream_locked(sb, m);
- toep->sb_cc = sb->sb_cc;
+ toep->sb_cc = sbused(sb);
}
socantrcvmore_locked(so); /* unlocks the sockbuf */
}
}
- KASSERT(toep->sb_cc >= sb->sb_cc,
+ KASSERT(toep->sb_cc >= sbused(sb),
("%s: sb %p has more data (%d) than last time (%d).",
- __func__, sb, sb->sb_cc, toep->sb_cc));
- toep->rx_credits += toep->sb_cc - sb->sb_cc;
+ __func__, sb, sbused(sb), toep->sb_cc));
+ toep->rx_credits += toep->sb_cc - sbused(sb);
sbappendstream_locked(sb, m);
- toep->sb_cc = sb->sb_cc;
+ toep->sb_cc = sbused(sb);
sorwakeup_locked(so);
SOCKBUF_UNLOCK_ASSERT(sb);
tp->rcv_wnd -= n;
#endif
- KASSERT(toep->sb_cc >= sb->sb_cc,
+ KASSERT(toep->sb_cc >= sbused(sb),
("%s: sb %p has more data (%d) than last time (%d).",
- __func__, sb, sb->sb_cc, toep->sb_cc));
- toep->rx_credits += toep->sb_cc - sb->sb_cc;
+ __func__, sb, sbused(sb), toep->sb_cc));
+ toep->rx_credits += toep->sb_cc - sbused(sb);
#ifdef USE_DDP_RX_FLOW_CONTROL
toep->rx_credits -= n; /* adjust for F_RX_FC_DDP */
#endif
sbappendstream_locked(sb, m);
- toep->sb_cc = sb->sb_cc;
+ toep->sb_cc = sbused(sb);
}
/* SET_TCB_FIELD sent as a ULP command looks like this */
else
discourage_ddp(toep);
- KASSERT(toep->sb_cc >= sb->sb_cc,
+ KASSERT(toep->sb_cc >= sbused(sb),
("%s: sb %p has more data (%d) than last time (%d).",
- __func__, sb, sb->sb_cc, toep->sb_cc));
- toep->rx_credits += toep->sb_cc - sb->sb_cc;
+ __func__, sb, sbused(sb), toep->sb_cc));
+ toep->rx_credits += toep->sb_cc - sbused(sb);
#ifdef USE_DDP_RX_FLOW_CONTROL
toep->rx_credits -= len; /* adjust for F_RX_FC_DDP */
#endif
sbappendstream_locked(sb, m);
- toep->sb_cc = sb->sb_cc;
+ toep->sb_cc = sbused(sb);
wakeup:
KASSERT(toep->ddp_flags & db_flag,
("%s: DDP buffer not active. toep %p, ddp_flags 0x%x, report 0x%x",
#endif
/* XXX: too eager to disable DDP, could handle NBIO better than this. */
- if (sb->sb_cc >= uio->uio_resid || uio->uio_resid < sc->tt.ddp_thres ||
+ if (sbused(sb) >= uio->uio_resid || uio->uio_resid < sc->tt.ddp_thres ||
uio->uio_resid > MAX_DDP_BUFFER_SIZE || uio->uio_iovcnt > 1 ||
so->so_state & SS_NBIO || flags & (MSG_DONTWAIT | MSG_NBIO) ||
error || so->so_error || sb->sb_state & SBS_CANTRCVMORE)
* payload.
*/
ddp_flags = select_ddp_flags(so, flags, db_idx);
- wr = mk_update_tcb_for_ddp(sc, toep, db_idx, sb->sb_cc, ddp_flags);
+ wr = mk_update_tcb_for_ddp(sc, toep, db_idx, sbused(sb), ddp_flags);
if (wr == NULL) {
/*
* Just unhold the pages. The DDP buffer's software state is
/* uio should be just as it was at entry */
KASSERT(oresid == uio->uio_resid,
- ("%s: oresid = %d, uio_resid = %zd, sb_cc = %d",
- __func__, oresid, uio->uio_resid, sb->sb_cc));
+ ("%s: oresid = %d, uio_resid = %zd, sbused = %d",
+ __func__, oresid, uio->uio_resid, sbused(sb)));
error = handle_ddp(so, uio, flags, 0);
ddp_handled = 1;
/* Abort if socket has reported problems. */
if (so->so_error) {
- if (sb->sb_cc > 0)
+ if (sbused(sb))
goto deliver;
if (oresid > uio->uio_resid)
goto out;
/* Door is closed. Deliver what is left, if any. */
if (sb->sb_state & SBS_CANTRCVMORE) {
- if (sb->sb_cc > 0)
+ if (sbused(sb))
goto deliver;
else
goto out;
}
/* Socket buffer is empty and we shall not block. */
- if (sb->sb_cc == 0 &&
+ if (sbused(sb) == 0 &&
((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
error = EAGAIN;
goto out;
}
/* Socket buffer got some data that we shall deliver now. */
- if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
+ if (sbused(sb) && !(flags & MSG_WAITALL) &&
((sb->sb_flags & SS_NBIO) ||
(flags & (MSG_DONTWAIT|MSG_NBIO)) ||
- sb->sb_cc >= sb->sb_lowat ||
- sb->sb_cc >= uio->uio_resid ||
- sb->sb_cc >= sb->sb_hiwat) ) {
+ sbused(sb) >= sb->sb_lowat ||
+ sbused(sb) >= uio->uio_resid ||
+ sbused(sb) >= sb->sb_hiwat) ) {
goto deliver;
}
/* On MSG_WAITALL we must wait until all data or error arrives. */
if ((flags & MSG_WAITALL) &&
- (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_lowat))
+ (sbused(sb) >= uio->uio_resid || sbused(sb) >= sb->sb_lowat))
goto deliver;
/*
deliver:
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
- KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
+ KASSERT(sbused(sb) > 0, ("%s: sockbuf empty", __func__));
KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
if (sb->sb_flags & SB_DDP_INDICATE && !ddp_handled)
uio->uio_td->td_ru.ru_msgrcv++;
/* Fill uio until full or current end of socket buffer is reached. */
- len = min(uio->uio_resid, sb->sb_cc);
+ len = min(uio->uio_resid, sbused(sb));
if (mp0 != NULL) {
/* Dequeue as many mbufs as possible. */
if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
* is enough data received to read the PDU.
*/
SOCKBUF_LOCK(&so->so_rcv);
- available = so->so_rcv.sb_cc;
+ available = sbavail(&so->so_rcv);
if (available < ic->ic_receive_len) {
so->so_rcv.sb_lowat = ic->ic_receive_len;
cv_wait(&ic->ic_receive_cv, &so->so_rcv.sb_mtx);
case FIONREAD:
/* Unlocked read. */
- *(int *)data = so->so_rcv.sb_cc;
+ *(int *)data = sbavail(&so->so_rcv);
break;
case FIONWRITE:
/* Unlocked read. */
- *(int *)data = so->so_snd.sb_cc;
+ *(int *)data = sbavail(&so->so_snd);
break;
case FIONSPACE:
- if ((so->so_snd.sb_hiwat < so->so_snd.sb_cc) ||
+ /* Unlocked read. */
+ if ((so->so_snd.sb_hiwat < sbused(&so->so_snd)) ||
(so->so_snd.sb_mbmax < so->so_snd.sb_mbcnt))
*(int *)data = 0;
else
struct thread *td)
{
struct socket *so = fp->f_data;
+ struct sockbuf *sb;
#ifdef MAC
int error;
#endif
* If SBS_CANTRCVMORE is set, but there's still data left in the
* receive buffer, the socket is still readable.
*/
- SOCKBUF_LOCK(&so->so_rcv);
- if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 ||
- so->so_rcv.sb_cc != 0)
+ sb = &so->so_rcv;
+ SOCKBUF_LOCK(sb);
+ if ((sb->sb_state & SBS_CANTRCVMORE) == 0 || sbavail(sb))
ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH;
- ub->st_size = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
- SOCKBUF_UNLOCK(&so->so_rcv);
- /* Unlocked read. */
- if ((so->so_snd.sb_state & SBS_CANTSENDMORE) == 0)
+ ub->st_size = sbavail(sb) - sb->sb_ctl;
+ SOCKBUF_UNLOCK(sb);
+
+ sb = &so->so_snd;
+ SOCKBUF_LOCK(sb);
+ if ((sb->sb_state & SBS_CANTSENDMORE) == 0)
ub->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
+ SOCKBUF_UNLOCK(sb);
ub->st_uid = so->so_cred->cr_uid;
ub->st_gid = so->so_cred->cr_gid;
return (*so->so_proto->pr_usrreqs->pru_sense)(so, ub);
* 2. MSG_DONTWAIT is not set
*/
if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
- so->so_rcv.sb_cc < uio->uio_resid) &&
- so->so_rcv.sb_cc < so->so_rcv.sb_lowat &&
+ sbavail(&so->so_rcv) < uio->uio_resid) &&
+ sbavail(&so->so_rcv) < so->so_rcv.sb_lowat &&
m->m_nextpkt == NULL && (pr->pr_flags & PR_ATOMIC) == 0)) {
- KASSERT(m != NULL || !so->so_rcv.sb_cc,
- ("receive: m == %p so->so_rcv.sb_cc == %u",
- m, so->so_rcv.sb_cc));
+ KASSERT(m != NULL || !sbavail(&so->so_rcv),
+ ("receive: m == %p sbavail == %u",
+ m, sbavail(&so->so_rcv)));
if (so->so_error) {
if (m != NULL)
goto dontblock;
/* Abort if socket has reported problems. */
if (so->so_error) {
- if (sb->sb_cc > 0)
+ if (sbavail(sb) > 0)
goto deliver;
if (oresid > uio->uio_resid)
goto out;
/* Door is closed. Deliver what is left, if any. */
if (sb->sb_state & SBS_CANTRCVMORE) {
- if (sb->sb_cc > 0)
+ if (sbavail(sb) > 0)
goto deliver;
else
goto out;
}
/* Socket buffer is empty and we shall not block. */
- if (sb->sb_cc == 0 &&
+ if (sbavail(sb) == 0 &&
((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
error = EAGAIN;
goto out;
}
/* Socket buffer got some data that we shall deliver now. */
- if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
+ if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
((sb->sb_flags & SS_NBIO) ||
(flags & (MSG_DONTWAIT|MSG_NBIO)) ||
- sb->sb_cc >= sb->sb_lowat ||
- sb->sb_cc >= uio->uio_resid ||
- sb->sb_cc >= sb->sb_hiwat) ) {
+ sbavail(sb) >= sb->sb_lowat ||
+ sbavail(sb) >= uio->uio_resid ||
+ sbavail(sb) >= sb->sb_hiwat) ) {
goto deliver;
}
/* On MSG_WAITALL we must wait until all data or error arrives. */
if ((flags & MSG_WAITALL) &&
- (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_hiwat))
+ (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_hiwat))
goto deliver;
/*
deliver:
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
- KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
+ KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
/* Statistics. */
uio->uio_td->td_ru.ru_msgrcv++;
/* Fill uio until full or current end of socket buffer is reached. */
- len = min(uio->uio_resid, sb->sb_cc);
+ len = min(uio->uio_resid, sbavail(sb));
if (mp0 != NULL) {
/* Dequeue as many mbufs as possible. */
if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
*/
SOCKBUF_LOCK(&so->so_rcv);
while ((m = so->so_rcv.sb_mb) == NULL) {
- KASSERT(so->so_rcv.sb_cc == 0,
- ("soreceive_dgram: sb_mb NULL but sb_cc %u",
- so->so_rcv.sb_cc));
+ KASSERT(sbavail(&so->so_rcv) == 0,
+ ("soreceive_dgram: sb_mb NULL but sbavail %u",
+ sbavail(&so->so_rcv)));
if (so->so_error) {
error = so->so_error;
so->so_error = 0;
so = kn->kn_fp->f_data;
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
- kn->kn_data = so->so_rcv.sb_cc - so->so_rcv.sb_ctl;
+ kn->kn_data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
kn->kn_flags |= EV_EOF;
kn->kn_fflags = so->so_error;
if (kn->kn_data >= kn->kn_sdata)
return 1;
} else {
- if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat)
+ if (sbavail(&so->so_rcv) >= so->so_rcv.sb_lowat)
return 1;
}
sorwakeup_locked(so);
SOCKBUF_LOCK(&so->so_snd);
so->so_snd.sb_state |= SBS_CANTSENDMORE;
- sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
+ sbdrop_locked(&so->so_snd, sbused(&so->so_snd));
sowwakeup_locked(so);
wakeup(&so->so_timeo);
}
/*
* Check if we have more data to send
*/
-
sbdroprecord(&pcb->so->so_snd);
- if (pcb->so->so_snd.sb_cc > 0) {
+ if (sbavail(&pcb->so->so_snd) > 0) {
if (ng_btsocket_l2cap_send2(pcb) == 0)
ng_btsocket_l2cap_timeout(pcb);
else
mtx_assert(&pcb->pcb_mtx, MA_OWNED);
- if (pcb->so->so_snd.sb_cc == 0)
+ if (sbavail(&pcb->so->so_snd) == 0)
return (EINVAL); /* XXX */
m = m_dup(pcb->so->so_snd.sb_mb, M_NOWAIT);
}
for (error = 0, sent = 0; sent < limit; sent ++) {
- length = min(pcb->mtu, pcb->so->so_snd.sb_cc);
+ length = min(pcb->mtu, sbavail(&pcb->so->so_snd));
if (length == 0)
break;
sbdroprecord(&pcb->so->so_snd);
/* Send more if we have any */
- if (pcb->so->so_snd.sb_cc > 0)
+ if (sbavail(&pcb->so->so_snd) > 0)
if (ng_btsocket_sco_send2(pcb) == 0)
ng_btsocket_sco_timeout(pcb);
mtx_assert(&pcb->pcb_mtx, MA_OWNED);
while (pcb->rt->pending < pcb->rt->num_pkts &&
- pcb->so->so_snd.sb_cc > 0) {
+ sbavail(&pcb->so->so_snd) > 0) {
/* Get a copy of the first packet on send queue */
m = m_dup(pcb->so->so_snd.sb_mb, M_NOWAIT);
if (m == NULL) {
struct sockbuf *sb = &so->so_rcv;
/* If the socket is full, we're ready. */
- if (sb->sb_cc >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax)
+ if (sbused(sb) >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax)
goto ready;
/* Check to see if we have a request. */
unsigned long packlen;
struct packet q, *p = &q;
- if (sb->sb_cc < 2)
+ if (sbavail(sb) < 2)
return DNS_WAIT;
q.m = sb->sb_mb;
q.n = q.m->m_nextpkt;
q.moff = 0;
q.offset = 0;
- q.len = sb->sb_cc;
+ q.len = sbavail(sb);
GET16(p, packlen);
if (packlen + 2 > q.len)
"mbcnt(%ld) >= mbmax(%ld): %d",
sb->sb_cc, sb->sb_hiwat, sb->sb_cc >= sb->sb_hiwat,
sb->sb_mbcnt, sb->sb_mbmax, sb->sb_mbcnt >= sb->sb_mbmax);
- return (sb->sb_cc >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax);
+ return (sbused(sb) >= sb->sb_hiwat || sb->sb_mbcnt >= sb->sb_mbmax);
}
/*
sohashttpget(struct socket *so, void *arg, int waitflag)
{
- if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 && !sbfull(&so->so_rcv)) {
+ if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) == 0 &&
+ !sbfull(&so->so_rcv)) {
struct mbuf *m;
char *cmp;
int cmplen, cc;
m = so->so_rcv.sb_mb;
- cc = so->so_rcv.sb_cc - 1;
+ cc = sbavail(&so->so_rcv) - 1;
if (cc < 1)
return (SU_OK);
switch (*mtod(m, char *)) {
goto fallout;
m = so->so_rcv.sb_mb;
- cc = so->so_rcv.sb_cc;
+ cc = sbavail(&so->so_rcv);
inspaces = spaces = 0;
for (m = so->so_rcv.sb_mb; m; m = n) {
n = m->m_nextpkt;
* have NCHRS left
*/
copied = 0;
- ccleft = so->so_rcv.sb_cc;
+ ccleft = sbavail(&so->so_rcv);
if (ccleft < NCHRS)
goto readmore;
a = b = c = '\0';
pn->flags = tp->t_flags;
pn->rxt_length = tp->t_rxtcur;
pn->snd_buf_hiwater = inp->inp_socket->so_snd.sb_hiwat;
- pn->snd_buf_cc = inp->inp_socket->so_snd.sb_cc;
+ pn->snd_buf_cc = sbused(&inp->inp_socket->so_snd);
pn->rcv_buf_hiwater = inp->inp_socket->so_rcv.sb_hiwat;
- pn->rcv_buf_cc = inp->inp_socket->so_rcv.sb_cc;
+ pn->rcv_buf_cc = sbused(&inp->inp_socket->so_rcv);
pn->sent_inflight_bytes = tp->snd_max - tp->snd_una;
pn->t_segqlen = tp->t_segqlen;
tcp_timer_activate(tp, TT_REXMT,
tp->t_rxtcur);
sowwakeup(so);
- if (so->so_snd.sb_cc)
+ if (sbavail(&so->so_snd))
(void) tcp_output(tp);
goto check_delack;
}
* Otherwise we would send pure ACKs.
*/
SOCKBUF_LOCK(&so->so_snd);
- avail = so->so_snd.sb_cc -
+ avail = sbavail(&so->so_snd) -
(tp->snd_nxt - tp->snd_una);
SOCKBUF_UNLOCK(&so->so_snd);
if (avail > 0)
cc_ack_received(tp, th, CC_ACK);
SOCKBUF_LOCK(&so->so_snd);
- if (acked > so->so_snd.sb_cc) {
- tp->snd_wnd -= so->so_snd.sb_cc;
+ if (acked > sbavail(&so->so_snd)) {
+ tp->snd_wnd -= sbavail(&so->so_snd);
mfree = sbcut_locked(&so->so_snd,
- (int)so->so_snd.sb_cc);
+ (int)sbavail(&so->so_snd));
ourfinisacked = 1;
} else {
mfree = sbcut_locked(&so->so_snd, acked);
* actually wanting to send this much urgent data.
*/
SOCKBUF_LOCK(&so->so_rcv);
- if (th->th_urp + so->so_rcv.sb_cc > sb_max) {
+ if (th->th_urp + sbavail(&so->so_rcv) > sb_max) {
th->th_urp = 0; /* XXX */
thflags &= ~TH_URG; /* XXX */
SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */
*/
if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) {
tp->rcv_up = th->th_seq + th->th_urp;
- so->so_oobmark = so->so_rcv.sb_cc +
+ so->so_oobmark = sbavail(&so->so_rcv) +
(tp->rcv_up - tp->rcv_nxt) - 1;
if (so->so_oobmark == 0)
so->so_rcv.sb_state |= SBS_RCVATMARK;
* to send then the probe will be the FIN
* itself.
*/
- if (off < so->so_snd.sb_cc)
+ if (off < sbused(&so->so_snd))
flags &= ~TH_FIN;
sendwin = 1;
} else {
*/
if (sack_rxmit == 0) {
if (sack_bytes_rxmt == 0)
- len = ((long)ulmin(so->so_snd.sb_cc, sendwin) - off);
+ len = ((long)ulmin(sbavail(&so->so_snd), sendwin) -
+ off);
else {
long cwin;
* sending new data, having retransmitted all the
* data possible in the scoreboard.
*/
- len = ((long)ulmin(so->so_snd.sb_cc, tp->snd_wnd)
- - off);
+ len = ((long)ulmin(sbavail(&so->so_snd), tp->snd_wnd) -
+ off);
/*
* Don't remove this (len > 0) check !
* We explicitly check for len > 0 here (although it
* TODO: Shrink send buffer during idle periods together
* with congestion window. Requires another timer. Has to
* wait for upcoming tcp timer rewrite.
+ *
+ * XXXGL: should there be used sbused() or sbavail()?
*/
if (V_tcp_do_autosndbuf && so->so_snd.sb_flags & SB_AUTOSIZE) {
if ((tp->snd_wnd / 4 * 5) >= so->so_snd.sb_hiwat &&
- so->so_snd.sb_cc >= (so->so_snd.sb_hiwat / 8 * 7) &&
- so->so_snd.sb_cc < V_tcp_autosndbuf_max &&
- sendwin >= (so->so_snd.sb_cc - (tp->snd_nxt - tp->snd_una))) {
+ sbused(&so->so_snd) >= (so->so_snd.sb_hiwat / 8 * 7) &&
+ sbused(&so->so_snd) < V_tcp_autosndbuf_max &&
+ sendwin >= (sbused(&so->so_snd) -
+ (tp->snd_nxt - tp->snd_una))) {
if (!sbreserve_locked(&so->so_snd,
min(so->so_snd.sb_hiwat + V_tcp_autosndbuf_inc,
V_tcp_autosndbuf_max), so, curthread))
tso = 1;
if (sack_rxmit) {
- if (SEQ_LT(p->rxmit + len, tp->snd_una + so->so_snd.sb_cc))
+ if (SEQ_LT(p->rxmit + len, tp->snd_una + sbused(&so->so_snd)))
flags &= ~TH_FIN;
} else {
- if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.sb_cc))
+ if (SEQ_LT(tp->snd_nxt + len, tp->snd_una +
+ sbused(&so->so_snd)))
flags &= ~TH_FIN;
}
*/
if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
(idle || (tp->t_flags & TF_NODELAY)) &&
- len + off >= so->so_snd.sb_cc &&
+ len + off >= sbavail(&so->so_snd) &&
(tp->t_flags & TF_NOPUSH) == 0) {
goto send;
}
* if window is nonzero, transmit what we can,
* otherwise force out a byte.
*/
- if (so->so_snd.sb_cc && !tcp_timer_active(tp, TT_REXMT) &&
+ if (sbavail(&so->so_snd) && !tcp_timer_active(tp, TT_REXMT) &&
!tcp_timer_active(tp, TT_PERSIST)) {
tp->t_rxtshift = 0;
tcp_setpersist(tp);
* emptied:
*/
max_len = (tp->t_maxopd - optlen);
- if ((off + len) < so->so_snd.sb_cc) {
+ if ((off + len) < sbavail(&so->so_snd)) {
moff = len % max_len;
if (moff != 0) {
len -= moff;
* give data to the user when a buffer fills or
* a PUSH comes in.)
*/
- if (off + len == so->so_snd.sb_cc)
+ if (off + len == sbused(&so->so_snd))
flags |= TH_PUSH;
SOCKBUF_UNLOCK(&so->so_snd);
} else {
("sdp_start_disconnect: sdp_drop() returned NULL"));
} else {
soisdisconnecting(so);
- unread = so->so_rcv.sb_cc;
+ unread = sbused(&so->so_rcv);
sbflush(&so->so_rcv);
sdp_usrclosed(ssk);
if (!(ssk->flags & SDP_DROPPED)) {
/* We will never ever get anything unless we are connected. */
if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
/* When disconnecting there may be still some data left. */
- if (sb->sb_cc > 0)
+ if (sbavail(sb))
goto deliver;
if (!(so->so_state & SS_ISDISCONNECTED))
error = ENOTCONN;
}
/* Socket buffer is empty and we shall not block. */
- if (sb->sb_cc == 0 &&
+ if (sbavail(sb) == 0 &&
((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
error = EAGAIN;
goto out;
/* Abort if socket has reported problems. */
if (so->so_error) {
- if (sb->sb_cc > 0)
+ if (sbavail(sb))
goto deliver;
if (oresid > uio->uio_resid)
goto out;
/* Door is closed. Deliver what is left, if any. */
if (sb->sb_state & SBS_CANTRCVMORE) {
- if (sb->sb_cc > 0)
+ if (sbavail(sb))
goto deliver;
else
goto out;
}
/* Socket buffer got some data that we shall deliver now. */
- if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
+ if (sbavail(sb) && !(flags & MSG_WAITALL) &&
((so->so_state & SS_NBIO) ||
(flags & (MSG_DONTWAIT|MSG_NBIO)) ||
- sb->sb_cc >= sb->sb_lowat ||
- sb->sb_cc >= uio->uio_resid ||
- sb->sb_cc >= sb->sb_hiwat) ) {
+ sbavail(sb) >= sb->sb_lowat ||
+ sbavail(sb) >= uio->uio_resid ||
+ sbavail(sb) >= sb->sb_hiwat) ) {
goto deliver;
}
/* On MSG_WAITALL we must wait until all data or error arrives. */
if ((flags & MSG_WAITALL) &&
- (sb->sb_cc >= uio->uio_resid || sb->sb_cc >= sb->sb_lowat))
+ (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_lowat))
goto deliver;
/*
deliver:
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
- KASSERT(sb->sb_cc > 0, ("%s: sockbuf empty", __func__));
+ KASSERT(sbavail(sb), ("%s: sockbuf empty", __func__));
KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
/* Statistics. */
uio->uio_td->td_ru.ru_msgrcv++;
/* Fill uio until full or current end of socket buffer is reached. */
- len = min(uio->uio_resid, sb->sb_cc);
+ len = min(uio->uio_resid, sbavail(sb));
if (mp0 != NULL) {
/* Dequeue as many mbufs as possible. */
if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
if (so == NULL)
return;
- so->so_oobmark = so->so_rcv.sb_cc + mb->m_pkthdr.len - 1;
+ so->so_oobmark = sbused(&so->so_rcv) + mb->m_pkthdr.len - 1;
sohasoutofband(so);
ssk->oobflags &= ~(SDP_HAVEOOB | SDP_HADOOB);
if (!(so->so_options & SO_OOBINLINE)) {
* Compute bytes in the receive queue and socket buffer.
*/
bytes_in_process = (posted - SDP_MIN_TX_CREDITS) * buffer_size;
- bytes_in_process += ssk->socket->so_rcv.sb_cc;
+ bytes_in_process += sbused(&ssk->socket->so_rcv);
return bytes_in_process < max_bytes;
}
* error condition
*/
do_read = FALSE;
- if (so->so_rcv.sb_cc >= sizeof(uint32_t)
+ if (sbavail(&so->so_rcv) >= sizeof(uint32_t)
|| (so->so_rcv.sb_state & SBS_CANTRCVMORE)
|| so->so_error)
do_read = TRUE;
* buffered.
*/
do_read = FALSE;
- if (so->so_rcv.sb_cc >= ct->ct_record_resid
+ if (sbavail(&so->so_rcv) >= ct->ct_record_resid
|| (so->so_rcv.sb_state & SBS_CANTRCVMORE)
|| so->so_error)
do_read = TRUE;
{
*ack = atomic_load_acq_32(&xprt->xp_snt_cnt);
- *ack -= xprt->xp_socket->so_snd.sb_cc;
+ *ack -= sbused(&xprt->xp_socket->so_snd);
return (TRUE);
}
int sblock(struct sockbuf *sb, int flags);
void sbunlock(struct sockbuf *sb);
+/*
+ * Return how much data is available to be taken out of socket
+ * bufffer right now.
+ */
+static inline u_int
+sbavail(struct sockbuf *sb)
+{
+
+#if 0
+ SOCKBUF_LOCK_ASSERT(sb);
+#endif
+ return (sb->sb_cc);
+}
+
+/*
+ * Return how much data sits there in the socket buffer
+ * It might be that some data is not yet ready to be read.
+ */
+static inline u_int
+sbused(struct sockbuf *sb)
+{
+
+#if 0
+ SOCKBUF_LOCK_ASSERT(sb);
+#endif
+ return (sb->sb_cc);
+}
+
/*
* How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
* This is problematical if the fields are unsigned, as the space might
/* can we read something from so? */
#define soreadabledata(so) \
- ((so)->so_rcv.sb_cc >= (so)->so_rcv.sb_lowat || \
+ (sbavail(&(so)->so_rcv) >= (so)->so_rcv.sb_lowat || \
!TAILQ_EMPTY(&(so)->so_comp) || (so)->so_error)
#define soreadable(so) \
(soreadabledata(so) || ((so)->so_rcv.sb_state & SBS_CANTRCVMORE))