CA_EVENT_CWND_RESTART,  /* congestion window restart */
        CA_EVENT_COMPLETE_CWR,  /* end of congestion recovery */
        CA_EVENT_LOSS,          /* loss timeout */
+       CA_EVENT_ECN_NO_CE,     /* ECT set, but not CE marked */
+       CA_EVENT_ECN_IS_CE,     /* received CE marked IP packet */
+       CA_EVENT_DELAYED_ACK,   /* Delayed ack is sent */
+       CA_EVENT_NON_DELAYED_ACK,
 };
 
+/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
 enum tcp_ca_ack_event_flags {
-       CA_ACK_SLOWPATH = (1 << 0),
+       CA_ACK_SLOWPATH         = (1 << 0),     /* In slow path processing */
+       CA_ACK_WIN_UPDATE       = (1 << 1),     /* ACK updated window */
+       CA_ACK_ECE              = (1 << 2),     /* ECE bit is set on ack */
 };
 
 /*
 
                        tcp_enter_quickack_mode((struct sock *)tp);
                break;
        case INET_ECN_CE:
+               if (tcp_ca_needs_ecn((struct sock *)tp))
+                       tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE);
+
                if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
                        /* Better not delay acks, sender can have a very low cwnd */
                        tcp_enter_quickack_mode((struct sock *)tp);
                        tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
                }
-               /* fallinto */
+               tp->ecn_flags |= TCP_ECN_SEEN;
+               break;
        default:
+               if (tcp_ca_needs_ecn((struct sock *)tp))
+                       tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE);
                tp->ecn_flags |= TCP_ECN_SEEN;
+               break;
        }
 }
 
                tp->snd_una = ack;
                flag |= FLAG_WIN_UPDATE;
 
-               tcp_in_ack_event(sk, 0);
+               tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
 
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
        } else {
+               u32 ack_ev_flags = CA_ACK_SLOWPATH;
+
                if (ack_seq != TCP_SKB_CB(skb)->end_seq)
                        flag |= FLAG_DATA;
                else
                        flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
                                                        &sack_rtt_us);
 
-               if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
+               if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb))) {
                        flag |= FLAG_ECE;
+                       ack_ev_flags |= CA_ACK_ECE;
+               }
+
+               if (flag & FLAG_WIN_UPDATE)
+                       ack_ev_flags |= CA_ACK_WIN_UPDATE;
 
-               tcp_in_ack_event(sk, CA_ACK_SLOWPATH);
+               tcp_in_ack_event(sk, ack_ev_flags);
        }
 
        /* We passed data and got it acked, remove any soft error
 
        int ato = icsk->icsk_ack.ato;
        unsigned long timeout;
 
+       tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
+
        if (ato > TCP_DELACK_MIN) {
                const struct tcp_sock *tp = tcp_sk(sk);
                int max_ato = HZ / 2;
        if (sk->sk_state == TCP_CLOSE)
                return;
 
+       tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
+
        /* We are not putting this on the write queue, so
         * tcp_transmit_skb() will set the ownership to this
         * sock.