]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/btrfs/free-space-cache.c
2e69b9c30437ca8c6705dc9a76e45fe788dd7394
[linux-2.6-omap-h63xx.git] / fs / btrfs / free-space-cache.c
1 /*
2  * Copyright (C) 2008 Red Hat.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/sched.h>
20 #include "ctree.h"
21
22 static int tree_insert_offset(struct rb_root *root, u64 offset,
23                               struct rb_node *node)
24 {
25         struct rb_node **p = &root->rb_node;
26         struct rb_node *parent = NULL;
27         struct btrfs_free_space *info;
28
29         while (*p) {
30                 parent = *p;
31                 info = rb_entry(parent, struct btrfs_free_space, offset_index);
32
33                 if (offset < info->offset)
34                         p = &(*p)->rb_left;
35                 else if (offset > info->offset)
36                         p = &(*p)->rb_right;
37                 else
38                         return -EEXIST;
39         }
40
41         rb_link_node(node, parent, p);
42         rb_insert_color(node, root);
43
44         return 0;
45 }
46
47 static int tree_insert_bytes(struct rb_root *root, u64 bytes,
48                              struct rb_node *node)
49 {
50         struct rb_node **p = &root->rb_node;
51         struct rb_node *parent = NULL;
52         struct btrfs_free_space *info;
53
54         while (*p) {
55                 parent = *p;
56                 info = rb_entry(parent, struct btrfs_free_space, bytes_index);
57
58                 if (bytes < info->bytes)
59                         p = &(*p)->rb_left;
60                 else
61                         p = &(*p)->rb_right;
62         }
63
64         rb_link_node(node, parent, p);
65         rb_insert_color(node, root);
66
67         return 0;
68 }
69
70 /*
71  * searches the tree for the given offset.  If contains is set we will return
72  * the free space that contains the given offset.  If contains is not set we
73  * will return the free space that starts at or after the given offset and is
74  * at least bytes long.
75  */
76 static struct btrfs_free_space *tree_search_offset(struct rb_root *root,
77                                                    u64 offset, u64 bytes,
78                                                    int contains)
79 {
80         struct rb_node *n = root->rb_node;
81         struct btrfs_free_space *entry, *ret = NULL;
82
83         while (n) {
84                 entry = rb_entry(n, struct btrfs_free_space, offset_index);
85
86                 if (offset < entry->offset) {
87                         if (!contains &&
88                             (!ret || entry->offset < ret->offset) &&
89                             (bytes <= entry->bytes))
90                                 ret = entry;
91                         n = n->rb_left;
92                 } else if (offset > entry->offset) {
93                         if ((entry->offset + entry->bytes - 1) >= offset &&
94                             bytes <= entry->bytes) {
95                                 ret = entry;
96                                 break;
97                         }
98                         n = n->rb_right;
99                 } else {
100                         if (bytes > entry->bytes) {
101                                 n = n->rb_right;
102                                 continue;
103                         }
104                         ret = entry;
105                         break;
106                 }
107         }
108
109         return ret;
110 }
111
112 /*
113  * return a chunk at least bytes size, as close to offset that we can get.
114  */
115 static struct btrfs_free_space *tree_search_bytes(struct rb_root *root,
116                                                   u64 offset, u64 bytes)
117 {
118         struct rb_node *n = root->rb_node;
119         struct btrfs_free_space *entry, *ret = NULL;
120
121         while (n) {
122                 entry = rb_entry(n, struct btrfs_free_space, bytes_index);
123
124                 if (bytes < entry->bytes) {
125                         /*
126                          * We prefer to get a hole size as close to the size we
127                          * are asking for so we don't take small slivers out of
128                          * huge holes, but we also want to get as close to the
129                          * offset as possible so we don't have a whole lot of
130                          * fragmentation.
131                          */
132                         if (offset <= entry->offset) {
133                                 if (!ret)
134                                         ret = entry;
135                                 else if (entry->bytes < ret->bytes)
136                                         ret = entry;
137                                 else if (entry->offset < ret->offset)
138                                         ret = entry;
139                         }
140                         n = n->rb_left;
141                 } else if (bytes > entry->bytes) {
142                         n = n->rb_right;
143                 } else {
144                         /*
145                          * Ok we may have multiple chunks of the wanted size,
146                          * so we don't want to take the first one we find, we
147                          * want to take the one closest to our given offset, so
148                          * keep searching just in case theres a better match.
149                          */
150                         n = n->rb_right;
151                         if (offset > entry->offset)
152                                 continue;
153                         else if (!ret || entry->offset < ret->offset)
154                                 ret = entry;
155                 }
156         }
157
158         return ret;
159 }
160
161 static void unlink_free_space(struct btrfs_block_group_cache *block_group,
162                               struct btrfs_free_space *info)
163 {
164         rb_erase(&info->offset_index, &block_group->free_space_offset);
165         rb_erase(&info->bytes_index, &block_group->free_space_bytes);
166 }
167
168 static int link_free_space(struct btrfs_block_group_cache *block_group,
169                            struct btrfs_free_space *info)
170 {
171         int ret = 0;
172
173
174         ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
175                                  &info->offset_index);
176         if (ret)
177                 return ret;
178
179         ret = tree_insert_bytes(&block_group->free_space_bytes, info->bytes,
180                                 &info->bytes_index);
181         if (ret)
182                 return ret;
183
184         return ret;
185 }
186
187 static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
188                                   u64 offset, u64 bytes)
189 {
190         struct btrfs_free_space *right_info;
191         struct btrfs_free_space *left_info;
192         struct btrfs_free_space *info = NULL;
193         struct btrfs_free_space *alloc_info;
194         int ret = 0;
195
196         alloc_info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
197         if (!alloc_info)
198                 return -ENOMEM;
199
200         /*
201          * first we want to see if there is free space adjacent to the range we
202          * are adding, if there is remove that struct and add a new one to
203          * cover the entire range
204          */
205         right_info = tree_search_offset(&block_group->free_space_offset,
206                                         offset+bytes, 0, 1);
207         left_info = tree_search_offset(&block_group->free_space_offset,
208                                        offset-1, 0, 1);
209
210         if (right_info && right_info->offset == offset+bytes) {
211                 unlink_free_space(block_group, right_info);
212                 info = right_info;
213                 info->offset = offset;
214                 info->bytes += bytes;
215         } else if (right_info && right_info->offset != offset+bytes) {
216                 printk(KERN_ERR "adding space in the middle of an existing "
217                        "free space area. existing: offset=%Lu, bytes=%Lu. "
218                        "new: offset=%Lu, bytes=%Lu\n", right_info->offset,
219                        right_info->bytes, offset, bytes);
220                 BUG();
221         }
222
223         if (left_info) {
224                 unlink_free_space(block_group, left_info);
225
226                 if (unlikely((left_info->offset + left_info->bytes) !=
227                              offset)) {
228                         printk(KERN_ERR "free space to the left of new free "
229                                "space isn't quite right. existing: offset=%Lu,"
230                                " bytes=%Lu. new: offset=%Lu, bytes=%Lu\n",
231                                left_info->offset, left_info->bytes, offset,
232                                bytes);
233                         BUG();
234                 }
235
236                 if (info) {
237                         info->offset = left_info->offset;
238                         info->bytes += left_info->bytes;
239                         kfree(left_info);
240                 } else {
241                         info = left_info;
242                         info->bytes += bytes;
243                 }
244         }
245
246         if (info) {
247                 ret = link_free_space(block_group, info);
248                 if (!ret)
249                         info = NULL;
250                 goto out;
251         }
252
253         info = alloc_info;
254         alloc_info = NULL;
255         info->offset = offset;
256         info->bytes = bytes;
257
258         ret = link_free_space(block_group, info);
259         if (ret)
260                 kfree(info);
261 out:
262         if (ret) {
263                 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
264                 if (ret == -EEXIST)
265                         BUG();
266         }
267
268         if (alloc_info)
269                 kfree(alloc_info);
270
271         return ret;
272 }
273
274 static int
275 __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
276                           u64 offset, u64 bytes)
277 {
278         struct btrfs_free_space *info;
279         int ret = 0;
280
281         info = tree_search_offset(&block_group->free_space_offset, offset, 0,
282                                   1);
283
284         if (info && info->offset == offset) {
285                 if (info->bytes < bytes) {
286                         printk(KERN_ERR "Found free space at %Lu, size %Lu,"
287                                "trying to use %Lu\n",
288                                info->offset, info->bytes, bytes);
289                         WARN_ON(1);
290                         ret = -EINVAL;
291                         goto out;
292                 }
293                 unlink_free_space(block_group, info);
294
295                 if (info->bytes == bytes) {
296                         kfree(info);
297                         goto out;
298                 }
299
300                 info->offset += bytes;
301                 info->bytes -= bytes;
302
303                 ret = link_free_space(block_group, info);
304                 BUG_ON(ret);
305         } else if (info && info->offset < offset &&
306                    info->offset + info->bytes >= offset + bytes) {
307                 u64 old_start = info->offset;
308                 /*
309                  * we're freeing space in the middle of the info,
310                  * this can happen during tree log replay
311                  *
312                  * first unlink the old info and then
313                  * insert it again after the hole we're creating
314                  */
315                 unlink_free_space(block_group, info);
316                 if (offset + bytes < info->offset + info->bytes) {
317                         u64 old_end = info->offset + info->bytes;
318
319                         info->offset = offset + bytes;
320                         info->bytes = old_end - info->offset;
321                         ret = link_free_space(block_group, info);
322                         BUG_ON(ret);
323                 } else {
324                         /* the hole we're creating ends at the end
325                          * of the info struct, just free the info
326                          */
327                         kfree(info);
328                 }
329
330                 /* step two, insert a new info struct to cover anything
331                  * before the hole
332                  */
333                 ret = __btrfs_add_free_space(block_group, old_start,
334                                              offset - old_start);
335                 BUG_ON(ret);
336         } else {
337                 WARN_ON(1);
338         }
339 out:
340         return ret;
341 }
342
343 int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
344                          u64 offset, u64 bytes)
345 {
346         int ret;
347         struct btrfs_free_space *sp;
348
349         mutex_lock(&block_group->alloc_mutex);
350         ret = __btrfs_add_free_space(block_group, offset, bytes);
351         sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
352         BUG_ON(!sp);
353         mutex_unlock(&block_group->alloc_mutex);
354
355         return ret;
356 }
357
358 int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
359                               u64 offset, u64 bytes)
360 {
361         int ret;
362         struct btrfs_free_space *sp;
363
364         ret = __btrfs_add_free_space(block_group, offset, bytes);
365         sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
366         BUG_ON(!sp);
367
368         return ret;
369 }
370
371 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
372                             u64 offset, u64 bytes)
373 {
374         int ret = 0;
375
376         mutex_lock(&block_group->alloc_mutex);
377         ret = __btrfs_remove_free_space(block_group, offset, bytes);
378         mutex_unlock(&block_group->alloc_mutex);
379
380         return ret;
381 }
382
383 int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
384                                  u64 offset, u64 bytes)
385 {
386         int ret;
387
388         ret = __btrfs_remove_free_space(block_group, offset, bytes);
389
390         return ret;
391 }
392
393 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
394                            u64 bytes)
395 {
396         struct btrfs_free_space *info;
397         struct rb_node *n;
398         int count = 0;
399
400         for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
401                 info = rb_entry(n, struct btrfs_free_space, offset_index);
402                 if (info->bytes >= bytes)
403                         count++;
404                 //printk(KERN_INFO "offset=%Lu, bytes=%Lu\n", info->offset,
405                 //       info->bytes);
406         }
407         printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
408                "\n", count);
409 }
410
411 u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
412 {
413         struct btrfs_free_space *info;
414         struct rb_node *n;
415         u64 ret = 0;
416
417         for (n = rb_first(&block_group->free_space_offset); n;
418              n = rb_next(n)) {
419                 info = rb_entry(n, struct btrfs_free_space, offset_index);
420                 ret += info->bytes;
421         }
422
423         return ret;
424 }
425
426 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
427 {
428         struct btrfs_free_space *info;
429         struct rb_node *node;
430
431         mutex_lock(&block_group->alloc_mutex);
432         while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
433                 info = rb_entry(node, struct btrfs_free_space, bytes_index);
434                 unlink_free_space(block_group, info);
435                 kfree(info);
436                 if (need_resched()) {
437                         mutex_unlock(&block_group->alloc_mutex);
438                         cond_resched();
439                         mutex_lock(&block_group->alloc_mutex);
440                 }
441         }
442         mutex_unlock(&block_group->alloc_mutex);
443 }
444
445 #if 0
446 static struct btrfs_free_space *btrfs_find_free_space_offset(struct
447                                                       btrfs_block_group_cache
448                                                       *block_group, u64 offset,
449                                                       u64 bytes)
450 {
451         struct btrfs_free_space *ret;
452
453         mutex_lock(&block_group->alloc_mutex);
454         ret = tree_search_offset(&block_group->free_space_offset, offset,
455                                  bytes, 0);
456         mutex_unlock(&block_group->alloc_mutex);
457
458         return ret;
459 }
460
461 static struct btrfs_free_space *btrfs_find_free_space_bytes(struct
462                                                      btrfs_block_group_cache
463                                                      *block_group, u64 offset,
464                                                      u64 bytes)
465 {
466         struct btrfs_free_space *ret;
467
468         mutex_lock(&block_group->alloc_mutex);
469
470         ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
471         mutex_unlock(&block_group->alloc_mutex);
472
473         return ret;
474 }
475 #endif
476
477 struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
478                                                *block_group, u64 offset,
479                                                u64 bytes)
480 {
481         struct btrfs_free_space *ret = NULL;
482
483         ret = tree_search_offset(&block_group->free_space_offset, offset,
484                                  bytes, 0);
485         if (!ret)
486                 ret = tree_search_bytes(&block_group->free_space_bytes,
487                                         offset, bytes);
488
489         return ret;
490 }