spinlock_t lock;
        unsigned long last_poll;
        int up;
-       int request_sent;
-       u32 txseq; /* TX sequence number */
+       u32 txseq; /* TX sequence number, 0 = none */
        u32 rxseq; /* RX sequence number */
 };
 
        struct cisco_packet *cisco_data;
        struct in_device *in_dev;
        __be32 addr, mask;
+       u32 ack;
 
        if (skb->len < sizeof(struct hdlc_header))
                goto rx_error;
                case CISCO_KEEPALIVE_REQ:
                        spin_lock(&st->lock);
                        st->rxseq = ntohl(cisco_data->par1);
-                       if (st->request_sent &&
-                           ntohl(cisco_data->par2) == st->txseq) {
+                       ack = ntohl(cisco_data->par2);
+                       if (ack && (ack == st->txseq ||
+                                   /* our current REQ may be in transit */
+                                   ack == st->txseq - 1)) {
                                st->last_poll = jiffies;
                                if (!st->up) {
                                        u32 sec, min, hrs, days;
 
        cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
                             htonl(st->rxseq));
-       st->request_sent = 1;
        spin_unlock(&st->lock);
 
        st->timer.expires = jiffies + st->settings.interval * HZ;
        unsigned long flags;
 
        spin_lock_irqsave(&st->lock, flags);
-       st->up = 0;
-       st->request_sent = 0;
-       st->txseq = st->rxseq = 0;
+       st->up = st->txseq = st->rxseq = 0;
        spin_unlock_irqrestore(&st->lock, flags);
 
        init_timer(&st->timer);
 
        spin_lock_irqsave(&st->lock, flags);
        netif_dormant_on(dev);
-       st->up = 0;
-       st->request_sent = 0;
+       st->up = st->txseq = 0;
        spin_unlock_irqrestore(&st->lock, flags);
 }