unsigned long zp;
unsigned long i;
unsigned long flags;
- gfp_t mask = GFP_KERNEL | __GFP_ZERO;
map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
if (!map)
init_waitqueue_head(&map->m_waitq);
INIT_LIST_HEAD(&map->m_conn_list);
- zp = __get_free_pages(mask, get_order(RDS_CONG_MAP_BYTES));
- if (zp == 0)
- goto out;
-
- for (i = 0; i < RDS_CONG_MAP_PAGES; i++)
- map->m_page_addrs[i] = zp + i * RDS_CONG_PAGE_SIZE;
+ for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
+ zp = get_zeroed_page(GFP_KERNEL);
+ if (zp == 0)
+ goto out;
+ map->m_page_addrs[i] = zp;
+ }
spin_lock_irqsave(&rds_cong_lock, flags);
ret = rds_cong_tree_walk(addr, map);
out:
if (map) {
- if (zp)
- __free_pages(virt_to_page(map->m_page_addrs[0]),
- get_order(RDS_CONG_MAP_BYTES));
+ for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
+ free_page(map->m_page_addrs[i]);
kfree(map);
}
static struct rds_message *rds_cong_map_pages(unsigned long *page_addrs, unsigned int total_len)
{
struct rds_message *rm;
- int num_sgs = RDS_CONG_MAP_SGE;
+ unsigned int i;
+ int num_sgs = ceil(total_len, RDS_CONG_PAGE_SIZE);
int extra_bytes = num_sgs * sizeof(struct scatterlist);
rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
- rm->data.op_nents = RDS_CONG_MAP_SGE;
+ rm->data.op_nents = num_sgs;
rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
- sg_set_page(&rm->data.op_sg[0], virt_to_page(page_addrs[0]),
- total_len, 0);
+ for (i = 0; i < rm->data.op_nents; i++) {
+ sg_set_page(&rm->data.op_sg[i],
+ virt_to_page(page_addrs[i]),
+ RDS_CONG_PAGE_SIZE, 0);
+ }
+
return rm;
}
{
struct rb_node *node;
struct rds_cong_map *map;
+ unsigned long i;
while ((node = rb_first(&rds_cong_tree))) {
map = rb_entry(node, struct rds_cong_map, m_rb_node);
rdsdebug("freeing map %p\n", map);
rb_erase(&map->m_rb_node, &rds_cong_tree);
- if (map->m_page_addrs[0])
- __free_pages(virt_to_page(map->m_page_addrs[0]),
- get_order(RDS_CONG_MAP_BYTES));
+ for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
+ free_page(map->m_page_addrs[i]);
kfree(map);
}
}
unsigned int map_off;
unsigned int map_page;
struct rds_page_frag *frag;
+ struct scatterlist *sg;
unsigned long frag_off;
unsigned long to_copy;
unsigned long copied;
frag_off = 0;
copied = 0;
+ sg = frag->f_sg;
while (copied < RDS_CONG_MAP_BYTES) {
uint64_t *src, *dst;
unsigned int k;
- to_copy = min(ic->i_frag_sz - frag_off, RDS_CONG_PAGE_SIZE - map_off);
+ to_copy = min(sg->length - frag_off, RDS_CONG_PAGE_SIZE - map_off);
BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
- addr = kmap_atomic(sg_page(frag->f_sg));
+ addr = kmap_atomic(sg_page(sg));
- src = addr + frag->f_sg[0].offset + frag_off;
+ src = addr + sg->offset + frag_off;
dst = (void *)map->m_page_addrs[map_page] + map_off;
for (k = 0; k < to_copy; k += 8) {
/* Record ports that became uncongested, ie
frag = list_entry(frag->f_item.next,
struct rds_page_frag, f_item);
frag_off = 0;
+ sg = frag->f_sg;
+ }
+
+ if (frag_off == sg->length) {
+ frag_off = 0;
+ sg = sg_next(sg);
}
}