comparison src/lib-index/mail-cache-transaction.c @ 7138:876c7bca351c HEAD

Link cache records together directly when writing the new records, instead of delaying them until later and causing lots of small writes. We still do this delayed check and do the writes when it's required, but it shouldn't happen normally.
author Timo Sirainen <tss@iki.fi>
date Thu, 10 Jan 2008 04:23:23 +0200
parents f0ad529ac9ea
children 24266b9a6c55
comparison
equal deleted inserted replaced
7137:c33c87781ab4 7138:876c7bca351c
671 mail_cache_transaction_switch_seq(struct mail_cache_transaction_ctx *ctx) 671 mail_cache_transaction_switch_seq(struct mail_cache_transaction_ctx *ctx)
672 { 672 {
673 struct mail_cache_record *rec, new_rec; 673 struct mail_cache_record *rec, new_rec;
674 void *data; 674 void *data;
675 size_t size; 675 size_t size;
676 uint32_t reset_id;
676 677
677 if (ctx->prev_seq != 0) { 678 if (ctx->prev_seq != 0) {
678 /* fix record size */ 679 /* fix record size */
679 data = buffer_get_modifiable_data(ctx->cache_data, &size); 680 data = buffer_get_modifiable_data(ctx->cache_data, &size);
680 rec = PTR_OFFSET(data, ctx->prev_pos); 681 rec = PTR_OFFSET(data, ctx->prev_pos);
681 rec->size = size - ctx->prev_pos; 682 rec->size = size - ctx->prev_pos;
682 i_assert(rec->size > sizeof(*rec)); 683 i_assert(rec->size > sizeof(*rec));
684
685 /* set prev_offset if possible */
686 rec->prev_offset =
687 mail_cache_lookup_cur_offset(ctx->view->view,
688 ctx->prev_seq, &reset_id);
689 if (reset_id != ctx->cache->hdr->file_seq)
690 rec->prev_offset = 0;
691 else
692 ctx->cache->hdr_copy.continued_record_count++;
683 693
684 array_append(&ctx->cache_data_seq, &ctx->prev_seq, 1); 694 array_append(&ctx->cache_data_seq, &ctx->prev_seq, 1);
685 ctx->prev_pos = size; 695 ctx->prev_pos = size;
686 } else if (ctx->cache_data == NULL) { 696 } else if (ctx->cache_data == NULL) {
687 ctx->cache_data = 697 ctx->cache_data =
1023 } 1033 }
1024 1034
1025 int mail_cache_link(struct mail_cache *cache, uint32_t old_offset, 1035 int mail_cache_link(struct mail_cache *cache, uint32_t old_offset,
1026 uint32_t new_offset) 1036 uint32_t new_offset)
1027 { 1037 {
1038 const struct mail_cache_record *rec;
1039
1028 i_assert(cache->locked); 1040 i_assert(cache->locked);
1029 1041
1030 if (MAIL_CACHE_IS_UNUSABLE(cache)) 1042 if (MAIL_CACHE_IS_UNUSABLE(cache))
1031 return -1; 1043 return -1;
1032 1044
1033 if (new_offset + sizeof(struct mail_cache_record) > 1045 /* this function is called for each added cache record (or cache
1034 cache->hdr_copy.used_file_size) { 1046 extension record update actually) with new_offset pointing to the
1047 new record and old_offset pointing to the previous record.
1048
1049 we want to keep the old and new records linked so both old and new
1050 cached data is found. normally they are already linked correctly.
1051 the problem only comes when multiple processes are adding cache
1052 records at the same time. we'd rather not lose those additions, so
1053 force the linking order to be new_offset -> old_offset if it isn't
1054 already. */
1055 if (mail_cache_map(cache, new_offset, sizeof(*rec)) < 0)
1056 return -1;
1057 if (new_offset + sizeof(*rec) > cache->mmap_length) {
1035 mail_cache_set_corrupted(cache, 1058 mail_cache_set_corrupted(cache,
1036 "Cache record offset %u points outside file", 1059 "Cache record offset %u points outside file",
1037 new_offset); 1060 new_offset);
1038 return -1; 1061 return -1;
1062 }
1063 rec = CACHE_RECORD(cache, new_offset);
1064 if (rec->prev_offset == old_offset) {
1065 /* link is already correct */
1066 return 0;
1039 } 1067 }
1040 1068
1041 if (mail_cache_link_unlocked(cache, old_offset, new_offset) < 0) 1069 if (mail_cache_link_unlocked(cache, old_offset, new_offset) < 0)
1042 return -1; 1070 return -1;
1043 1071