[DRBD-cvs] svn commit by lars - r2993 - in
branches/drbd-8.0-for-linus/drbd: . linux -
eae4495f2ea3c2fa62fb1b8824e950d17471528b fix spaces ->
drbd-cvs at lists.linbit.com
drbd-cvs at lists.linbit.com
Tue Jul 24 15:25:51 CEST 2007
Author: lars
Date: 2007-07-24 15:25:49 +0200 (Tue, 24 Jul 2007)
New Revision: 2993
Modified:
branches/drbd-8.0-for-linus/drbd/drbd_actlog.c
branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c
branches/drbd-8.0-for-linus/drbd/drbd_int.h
branches/drbd-8.0-for-linus/drbd/drbd_main.c
branches/drbd-8.0-for-linus/drbd/drbd_nl.c
branches/drbd-8.0-for-linus/drbd/drbd_receiver.c
branches/drbd-8.0-for-linus/drbd/drbd_req.c
branches/drbd-8.0-for-linus/drbd/drbd_strings.c
branches/drbd-8.0-for-linus/drbd/drbd_worker.c
branches/drbd-8.0-for-linus/drbd/linux/drbd_nl.h
branches/drbd-8.0-for-linus/drbd/lru_cache.c
branches/drbd-8.0-for-linus/drbd/lru_cache.h
Log:
eae4495f2ea3c2fa62fb1b8824e950d17471528b fix spaces -> tabs; fix trailing whitespace
b4d0b6bd635e16e9a468fe512eea23c4b9e47381 and more single whitespace fixes
Modified: branches/drbd-8.0-for-linus/drbd/drbd_actlog.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_actlog.c 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/drbd_actlog.c 2007-07-24 13:25:49 UTC (rev 2993)
@@ -226,7 +226,7 @@
MTRACE(TraceTypeALExts, TraceLvlMetrics,
INFO("al_begin_io( sec=%llus (al_enr=%u) (rs_enr=%d) )\n",
- (unsigned long long) sector, enr,
+ (unsigned long long) sector, enr,
(int)BM_SECT_TO_EXT(sector));
);
@@ -274,7 +274,7 @@
MTRACE(TraceTypeALExts, TraceLvlMetrics,
INFO("al_complete_io( sec=%llus (al_enr=%u) (rs_enr=%d) )\n",
- (unsigned long long) sector, enr,
+ (unsigned long long) sector, enr,
(int)BM_SECT_TO_EXT(sector));
);
@@ -325,14 +325,14 @@
mx = min_t(int, AL_EXTENTS_PT,
mdev->act_log->nr_elements - mdev->al_tr_cycle);
- for(i = 0;i < mx;i++) {
+ for (i = 0; i < mx; i++) {
extent_nr = lc_entry(mdev->act_log,
mdev->al_tr_cycle+i)->lc_number;
buffer->updates[i+1].pos = cpu_to_be32(mdev->al_tr_cycle+i);
buffer->updates[i+1].extent = cpu_to_be32(extent_nr);
xor_sum ^= extent_nr;
}
- for(;i < AL_EXTENTS_PT;i++) {
+ for (; i < AL_EXTENTS_PT; i++) {
buffer->updates[i+1].pos = __constant_cpu_to_be32(-1);
buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE);
xor_sum ^= LC_FREE;
@@ -386,7 +386,7 @@
rv = ( be32_to_cpu(b->magic) == DRBD_MAGIC );
- for(i = 0;i < AL_EXTENTS_PT+1;i++)
+ for (i = 0; i < AL_EXTENTS_PT+1; i++)
xor_sum ^= be32_to_cpu(b->updates[i].extent);
rv &= (xor_sum == be32_to_cpu(b->xor_sum));
@@ -416,7 +416,7 @@
buffer = page_address(mdev->md_io_page);
/* Find the valid transaction in the log */
- for(i = 0;i <= mx;i++) {
+ for (i = 0; i <= mx; i++) {
rv = drbd_al_read_tr(mdev, bdev, buffer, i);
if (rv == 0) continue;
if (rv == -1) {
@@ -448,7 +448,7 @@
/* Read the valid transactions.
* INFO("Reading from %d to %d.\n",from,to); */
i = from;
- while(1) {
+ while (1) {
int j, pos;
unsigned int extent_nr;
unsigned int trn;
@@ -468,7 +468,7 @@
elements there might be an old version of the
updated element (in slot 0). So the element in slot 0
can overwrite old versions. */
- for(j = AL_EXTENTS_PT;j >= 0;j--) {
+ for (j = AL_EXTENTS_PT; j >= 0; j--) {
pos = be32_to_cpu(buffer->updates[j].pos);
extent_nr = be32_to_cpu(buffer->updates[j].extent);
@@ -556,7 +556,7 @@
int offset;
/* check if that enr is already covered by an already created bio. */
- while( (bio = bios[i]) ) {
+ while ( (bio = bios[i]) ) {
if (bio->bi_sector == on_disk_sector) return 0;
i++;
}
@@ -579,7 +579,7 @@
}
offset = S2W(enr);
- drbd_bm_get_lel( mdev, offset,
+ drbd_bm_get_lel( mdev, offset,
min_t(size_t, S2W(1), drbd_bm_words(mdev) - offset),
kmap(*page) + *page_offset );
kunmap(*page);
@@ -635,7 +635,7 @@
wc.mdev = mdev;
wc.error = 0;
- for(i = 0;i < nr_elements;i++) {
+ for (i = 0; i < nr_elements; i++) {
enr = lc_entry(mdev->act_log, i)->lc_number;
if (enr == LC_FREE) continue;
/* next statement also does atomic_inc wc.count */
@@ -651,7 +651,7 @@
wake_up(&mdev->al_wait);
/* all prepared, submit them */
- for(i = 0;i < nr_elements;i++) {
+ for (i = 0; i < nr_elements; i++) {
if (bios[i] == NULL) break;
if (FAULT_ACTIVE( mdev, DRBD_FAULT_MD_WR )) {
bios[i]->bi_rw = WRITE;
@@ -680,7 +680,7 @@
free_bios_submit_one_by_one:
/* free everything by calling the endio callback directly. */
- for(i = 0;i < nr_elements;i++) {
+ for (i = 0; i < nr_elements; i++) {
if (bios[i] == NULL) break;
bios[i]->bi_size = 0;
atodb_endio(bios[i], MD_HARDSECT, 0);
@@ -690,7 +690,7 @@
submit_one_by_one:
WARN("Using the slow drbd_al_to_on_disk_bm()\n");
- for(i = 0;i < mdev->act_log->nr_elements;i++) {
+ for (i = 0; i < mdev->act_log->nr_elements; i++) {
enr = lc_entry(mdev->act_log, i)->lc_number;
if (enr == LC_FREE) continue;
/* Really slow: if we have al-extents 16..19 active,
@@ -716,7 +716,7 @@
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
- for(i = 0;i < mdev->act_log->nr_elements;i++) {
+ for (i = 0; i < mdev->act_log->nr_elements; i++) {
enr = lc_entry(mdev->act_log, i)->lc_number;
if (enr == LC_FREE) continue;
add += drbd_bm_ALe_set_all(mdev, enr);
@@ -755,7 +755,7 @@
D_ASSERT( test_bit(__LC_DIRTY, &mdev->act_log->flags) );
- for(i = 0;i < mdev->act_log->nr_elements;i++) {
+ for (i = 0; i < mdev->act_log->nr_elements; i++) {
al_ext = lc_entry(mdev->act_log, i);
if (al_ext->lc_number == LC_FREE) continue;
wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext));
@@ -930,7 +930,7 @@
* we count rs_{total,left} in bits, not sectors.
*/
spin_lock_irqsave(&mdev->al_lock, flags);
- for(bnr = sbnr; bnr <= ebnr; bnr++) {
+ for (bnr = sbnr; bnr <= ebnr; bnr++) {
if (drbd_bm_clear_bit(mdev, bnr)) count++;
}
if (count) {
@@ -1051,7 +1051,7 @@
return bm_ext;
}
-static inline int _is_in_al(drbd_dev* mdev, unsigned int enr)
+static inline int _is_in_al(drbd_dev *mdev, unsigned int enr)
{
struct lc_element *al_ext;
int rv = 0;
@@ -1084,7 +1084,7 @@
* returns 1 if successful.
* returns 0 if interrupted.
*/
-int drbd_rs_begin_io(drbd_dev* mdev, sector_t sector)
+int drbd_rs_begin_io(drbd_dev *mdev, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct bm_extent *bm_ext;
@@ -1101,7 +1101,7 @@
if (test_bit(BME_LOCKED, &bm_ext->flags)) return 1;
- for(i = 0;i < AL_EXT_PER_BM_SECT;i++) {
+ for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
sig = wait_event_interruptible( mdev->al_wait,
!_is_in_al(mdev, enr*AL_EXT_PER_BM_SECT+i) );
if (sig) {
@@ -1131,7 +1131,7 @@
* returns zero if we could set BME_LOCKED and can proceed,
* -EAGAIN if we need to try again.
*/
-int drbd_try_rs_begin_io(drbd_dev* mdev, sector_t sector)
+int drbd_try_rs_begin_io(drbd_dev *mdev, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
@@ -1220,7 +1220,7 @@
MTRACE(TraceTypeResync, TraceLvlAll,
INFO("checking al for %u\n", enr);
);
- for (i = 0;i < AL_EXT_PER_BM_SECT;i++) {
+ for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
if (unlikely(al_enr+i == mdev->act_log->new_number))
goto try_again;
if (lc_is_used(mdev->act_log, al_enr+i))
@@ -1241,7 +1241,7 @@
return -EAGAIN;
}
-void drbd_rs_complete_io(drbd_dev* mdev, sector_t sector)
+void drbd_rs_complete_io(drbd_dev *mdev, sector_t sector)
{
unsigned int enr = BM_SECT_TO_EXT(sector);
struct bm_extent *bm_ext;
@@ -1281,7 +1281,7 @@
* drbd_rs_cancel_all: Removes extents from the resync LRU. Even
* if they are BME_LOCKED.
*/
-void drbd_rs_cancel_all(drbd_dev* mdev)
+void drbd_rs_cancel_all(drbd_dev *mdev)
{
struct bm_extent *bm_ext;
int i;
@@ -1294,7 +1294,7 @@
if (inc_local_if_state(mdev, Failed)) {
/* ok, ->resync is there. */
- for(i = 0;i < mdev->resync->nr_elements;i++) {
+ for (i = 0; i < mdev->resync->nr_elements; i++) {
bm_ext = (struct bm_extent *) lc_entry(mdev->resync, i);
if (bm_ext->lce.lc_number == LC_FREE) continue;
bm_ext->lce.refcnt = 0; /* Rude but ok. */
@@ -1318,7 +1318,7 @@
* returns -EAGAIN.
* In case all elements got removed it returns zero.
*/
-int drbd_rs_del_all(drbd_dev* mdev)
+int drbd_rs_del_all(drbd_dev *mdev)
{
struct bm_extent *bm_ext;
int i;
@@ -1331,7 +1331,7 @@
if (inc_local_if_state(mdev, Failed)) {
/* ok, ->resync is there. */
- for(i = 0;i < mdev->resync->nr_elements;i++) {
+ for (i = 0; i < mdev->resync->nr_elements; i++) {
bm_ext = (struct bm_extent *) lc_entry(mdev->resync, i);
if (bm_ext->lce.lc_number == LC_FREE) continue;
if (bm_ext->lce.lc_number == mdev->resync_wenr) {
@@ -1369,7 +1369,7 @@
* called on SyncTarget when resync write fails or NegRSDReply received
*
*/
-void drbd_rs_failed_io(drbd_dev* mdev, sector_t sector, int size)
+void drbd_rs_failed_io(drbd_dev *mdev, sector_t sector, int size)
{
/* Is called from worker and receiver context _only_ */
unsigned long sbnr, ebnr, lbnr, bnr;
@@ -1413,8 +1413,8 @@
* we count rs_{total,left} in bits, not sectors.
*/
spin_lock_irq(&mdev->al_lock);
- for(bnr = sbnr; bnr <= ebnr; bnr++) {
- if (drbd_bm_test_bit(mdev, bnr) > 0) count++;
+ for (bnr = sbnr; bnr <= ebnr; bnr++) {
+ if (drbd_bm_test_bit(mdev, bnr) > 0) count++;
}
if (count) {
mdev->rs_failed += count;
Modified: branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/drbd_bitmap.c 2007-07-24 13:25:49 UTC (rev 2993)
@@ -352,7 +352,7 @@
spin_unlock_irq(&b->bm_lock);
goto out;
} else {
- /* one extra long to catch off by one errors */
+ /* one extra long to catch off by one errors */
bytes = (words+1)*sizeof(long);
nbm = vmalloc(bytes);
if (!nbm) {
@@ -446,7 +446,7 @@
spin_lock_irq(&b->bm_lock);
bm = b->bm + offset;
- while(n--) {
+ while (n--) {
bits = hweight_long(*bm);
word = *bm | lel_to_cpu(*buffer++);
*bm++ = word;
@@ -484,7 +484,7 @@
spin_lock_irq(&b->bm_lock);
bm = b->bm + offset;
- while(n--) {
+ while (n--) {
bits = hweight_long(*bm);
word = lel_to_cpu(*buffer++);
*bm++ = word;
@@ -528,7 +528,7 @@
spin_lock_irq(&b->bm_lock);
bm = b->bm + offset;
- while(number--) *buffer++ = cpu_to_lel(*bm++);
+ while (number--) *buffer++ = cpu_to_lel(*bm++);
spin_unlock_irq(&b->bm_lock);
}
@@ -902,9 +902,9 @@
* only called from drbd_set_out_of_sync.
* strange_state blubber is already in place there...
strange_state = ( mdev->cstate > Connected ) ||
- ( mdev->cstate == Connected &&
- !(test_bit(DISKLESS,&mdev->flags) ||
- test_bit(PARTNER_DISKLESS,&mdev->flags)) );
+ ( mdev->cstate == Connected &&
+ !(test_bit(DISKLESS,&mdev->flags) ||
+ test_bit(PARTNER_DISKLESS,&mdev->flags)) );
if (strange_state)
ERR("%s in drbd_bm_set_bit\n", conns_to_name(mdev->cstate));
*/
Modified: branches/drbd-8.0-for-linus/drbd/drbd_int.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_int.h 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/drbd_int.h 2007-07-24 13:25:49 UTC (rev 2993)
@@ -181,7 +181,7 @@
#ifdef DBG_ASSERTS
-extern void drbd_assert_breakpoint(drbd_dev*, char *, char *, int );
+extern void drbd_assert_breakpoint(drbd_dev *, char *, char *, int );
# define D_ASSERT(exp) if (!(exp)) \
drbd_assert_breakpoint(mdev, #exp, __FILE__, __LINE__)
#else
@@ -598,7 +598,7 @@
* drbd_request and Tl_epoch_entry are descendants of drbd_work.
*/
struct drbd_work;
-typedef int (*drbd_work_cb)(drbd_dev*, struct drbd_work*, int cancel);
+typedef int (*drbd_work_cb)(drbd_dev *, struct drbd_work *, int cancel);
struct drbd_work {
struct list_head list;
drbd_work_cb cb;
@@ -934,14 +934,14 @@
ScheduleAfter = 4,
};
-extern int drbd_change_state(drbd_dev* mdev, enum chg_state_flags f,
+extern int drbd_change_state(drbd_dev *mdev, enum chg_state_flags f,
drbd_state_t mask, drbd_state_t val);
-extern void drbd_force_state(drbd_dev*, drbd_state_t, drbd_state_t);
-extern int _drbd_request_state(drbd_dev*, drbd_state_t, drbd_state_t,
+extern void drbd_force_state(drbd_dev *, drbd_state_t, drbd_state_t);
+extern int _drbd_request_state(drbd_dev *, drbd_state_t, drbd_state_t,
enum chg_state_flags);
-extern int _drbd_set_state(drbd_dev*, drbd_state_t, enum chg_state_flags );
-extern void print_st_err(drbd_dev*, drbd_state_t, drbd_state_t, int );
-extern void after_state_ch(drbd_dev* mdev, drbd_state_t os, drbd_state_t ns,
+extern int _drbd_set_state(drbd_dev *, drbd_state_t, enum chg_state_flags );
+extern void print_st_err(drbd_dev *, drbd_state_t, drbd_state_t, int );
+extern void after_state_ch(drbd_dev *mdev, drbd_state_t os, drbd_state_t ns,
enum chg_state_flags);
extern int drbd_thread_start(struct Drbd_thread *thi);
extern void _drbd_thread_stop(struct Drbd_thread *thi, int restart, int wait);
@@ -989,7 +989,7 @@
extern int _drbd_send_bitmap(drbd_dev *mdev);
extern int drbd_send_sr_reply(drbd_dev *mdev, int retcode);
extern void drbd_free_bc(struct drbd_backing_dev *bc);
-extern int drbd_io_error(drbd_dev* mdev, int forcedetach);
+extern int drbd_io_error(drbd_dev *mdev, int forcedetach);
extern void drbd_mdev_cleanup(drbd_dev *mdev);
/* drbd_meta-data.c (still in drbd_main.c) */
@@ -1301,8 +1301,8 @@
/* drbd_nl.c */
extern char *ppsize(char *buf, unsigned long long size);
-extern sector_t drbd_new_dev_size(struct Drbd_Conf*, struct drbd_backing_dev*);
-extern int drbd_determin_dev_size(drbd_dev*);
+extern sector_t drbd_new_dev_size(struct Drbd_Conf *, struct drbd_backing_dev *);
+extern int drbd_determin_dev_size(drbd_dev *);
extern void drbd_setup_queue_param(drbd_dev *mdev, unsigned int);
extern int drbd_set_role(drbd_dev *mdev, drbd_role_t new_role, int force);
extern int drbd_ioctl(struct inode *inode, struct file *file,
@@ -1315,8 +1315,8 @@
extern int drbd_worker(struct Drbd_thread *thi);
extern void drbd_alter_sa(drbd_dev *mdev, int na);
extern void drbd_start_resync(drbd_dev *mdev, drbd_conns_t side);
-extern void resume_next_sg(drbd_dev* mdev);
-extern void suspend_other_sg(drbd_dev* mdev);
+extern void resume_next_sg(drbd_dev *mdev);
+extern void suspend_other_sg(drbd_dev *mdev);
extern int drbd_resync_finished(drbd_dev *mdev);
/* maybe rather drbd_main.c ? */
extern int drbd_md_sync_page_io(drbd_dev *mdev, struct drbd_backing_dev *bdev,
@@ -1392,9 +1392,9 @@
extern void drbd_rs_complete_io(struct Drbd_Conf *mdev, sector_t sector);
extern int drbd_rs_begin_io(struct Drbd_Conf *mdev, sector_t sector);
extern int drbd_try_rs_begin_io(struct Drbd_Conf *mdev, sector_t sector);
-extern void drbd_rs_cancel_all(drbd_dev* mdev);
-extern int drbd_rs_del_all(drbd_dev* mdev);
-extern void drbd_rs_failed_io(drbd_dev* mdev, sector_t sector, int size);
+extern void drbd_rs_cancel_all(drbd_dev *mdev);
+extern int drbd_rs_del_all(drbd_dev *mdev);
+extern void drbd_rs_failed_io(drbd_dev *mdev, sector_t sector, int size);
extern int drbd_al_read_log(struct Drbd_Conf *mdev, struct drbd_backing_dev *);
extern void __drbd_set_in_sync(drbd_dev *mdev, sector_t sector, int size, const char *file, const unsigned int line);
#define drbd_set_in_sync(mdev, sector, size) \
@@ -1424,26 +1424,26 @@
#define user_isp_mask 1
#define aftr_isp_mask 1
-#define NS(T, S) ({drbd_state_t mask; mask.i = 0; mask.T = T##_mask; mask;}), \
- ({drbd_state_t val; val.i = 0; val.T = (S); val;})
+#define NS(T, S) ({drbd_state_t mask; mask.i = 0; mask.T = T##_mask; mask; }), \
+ ({drbd_state_t val; val.i = 0; val.T = (S); val; })
#define NS2(T1, S1, T2, S2) \
({drbd_state_t mask; mask.i = 0; mask.T1 = T1##_mask; \
- mask.T2 = T2##_mask; mask;}), \
+ mask.T2 = T2##_mask; mask; }), \
({drbd_state_t val; val.i = 0; val.T1 = (S1); \
- val.T2 = (S2); val;})
+ val.T2 = (S2); val; })
#define NS3(T1, S1, T2, S2, T3, S3) \
({drbd_state_t mask; mask.i = 0; mask.T1 = T1##_mask; \
- mask.T2 = T2##_mask; mask.T3 = T3##_mask; mask;}), \
+ mask.T2 = T2##_mask; mask.T3 = T3##_mask; mask; }), \
({drbd_state_t val; val.i = 0; val.T1 = (S1); \
- val.T2 = (S2); val.T3 = (S3); val;})
+ val.T2 = (S2); val.T3 = (S3); val; })
-#define _NS(D, T, S) D, ({drbd_state_t ns; ns.i = D->state.i; ns.T = (S); ns;})
+#define _NS(D, T, S) D, ({drbd_state_t ns; ns.i = D->state.i; ns.T = (S); ns; })
#define _NS2(D, T1, S1, T2, S2) \
D, ({drbd_state_t ns; ns.i = D->state.i; ns.T1 = (S1); \
- ns.T2 = (S2); ns;})
+ ns.T2 = (S2); ns; })
#define _NS3(D, T1, S1, T2, S2, T3, S3) \
D, ({drbd_state_t ns; ns.i = D->state.i; ns.T1 = (S1); \
- ns.T2 = (S2); ns.T3 = (S3); ns;})
+ ns.T2 = (S2); ns.T3 = (S3); ns; })
static inline void drbd_state_lock(drbd_dev *mdev)
{
@@ -1457,7 +1457,7 @@
wake_up(&mdev->misc_wait);
}
-static inline int drbd_request_state(drbd_dev* mdev, drbd_state_t mask,
+static inline int drbd_request_state(drbd_dev *mdev, drbd_state_t mask,
drbd_state_t val)
{
return _drbd_request_state(mdev, mask, val, ChgStateVerbose);
@@ -1467,9 +1467,9 @@
* drbd_chk_io_error: Handles the on_io_error setting, should be called from
* all io completion handlers. See also drbd_io_error().
*/
-static inline void __drbd_chk_io_error(drbd_dev* mdev, int forcedetach)
+static inline void __drbd_chk_io_error(drbd_dev *mdev, int forcedetach)
{
- switch(mdev->bc->dc.on_io_error) {
+ switch (mdev->bc->dc.on_io_error) {
case PassOn: /* FIXME would this be better named "Ignore"? */
if (!forcedetach) {
if (printk_ratelimit())
@@ -1488,7 +1488,7 @@
}
}
-static inline void drbd_chk_io_error(drbd_dev* mdev, int error, int forcedetach)
+static inline void drbd_chk_io_error(drbd_dev *mdev, int error, int forcedetach)
{
if (error) {
unsigned long flags;
@@ -1673,7 +1673,7 @@
* _req_mod(req, connection_lost_while_pending)
* [from tl_clear_barrier]
*/
-static inline void inc_ap_pending(drbd_dev* mdev)
+static inline void inc_ap_pending(drbd_dev *mdev)
{
atomic_inc(&mdev->ap_pending_cnt);
}
@@ -1685,7 +1685,7 @@
atomic_read(&mdev->which))
#define dec_ap_pending(mdev) do { \
- typecheck(drbd_dev*, mdev); \
+ typecheck(drbd_dev *, mdev); \
if (atomic_dec_and_test(&mdev->ap_pending_cnt)) \
wake_up(&mdev->misc_wait); \
ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0)
@@ -1696,13 +1696,13 @@
* SyncSource sends RSDataReply (and expects WriteAck whith ID_SYNCER)
* (or NegAck with ID_SYNCER)
*/
-static inline void inc_rs_pending(drbd_dev* mdev)
+static inline void inc_rs_pending(drbd_dev *mdev)
{
atomic_inc(&mdev->rs_pending_cnt);
}
#define dec_rs_pending(mdev) do { \
- typecheck(drbd_dev*, mdev); \
+ typecheck(drbd_dev *, mdev); \
atomic_dec(&mdev->rs_pending_cnt); \
ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0)
@@ -1715,23 +1715,23 @@
* receive_DataRequest (receive_RSDataRequest) we need to send back Data
* receive_Barrier_* we need to send a BarrierAck
*/
-static inline void inc_unacked(drbd_dev* mdev)
+static inline void inc_unacked(drbd_dev *mdev)
{
atomic_inc(&mdev->unacked_cnt);
}
#define dec_unacked(mdev) do { \
- typecheck(drbd_dev*, mdev); \
+ typecheck(drbd_dev *, mdev); \
atomic_dec(&mdev->unacked_cnt); \
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
#define sub_unacked(mdev, n) do { \
- typecheck(drbd_dev*, mdev); \
+ typecheck(drbd_dev *, mdev); \
atomic_sub(n, &mdev->unacked_cnt); \
ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
-static inline void dec_net(drbd_dev* mdev)
+static inline void dec_net(drbd_dev *mdev)
{
if (atomic_dec_and_test(&mdev->net_cnt))
wake_up(&mdev->misc_wait);
@@ -1741,7 +1741,7 @@
* inc_net: Returns TRUE when it is ok to access mdev->net_conf. You
* should call dec_net() when finished looking at mdev->net_conf.
*/
-static inline int inc_net(drbd_dev* mdev)
+static inline int inc_net(drbd_dev *mdev)
{
int have_net_conf;
@@ -1757,7 +1757,7 @@
* this is mood...
*/
-static inline void dec_local(drbd_dev* mdev)
+static inline void dec_local(drbd_dev *mdev)
{
if (atomic_dec_and_test(&mdev->local_cnt))
wake_up(&mdev->misc_wait);
@@ -1767,7 +1767,7 @@
* inc_local: Returns TRUE when local IO is possible. If it returns
* TRUE you should call dec_local() after IO is completed.
*/
-static inline int inc_local_if_state(drbd_dev* mdev, drbd_disks_t mins)
+static inline int inc_local_if_state(drbd_dev *mdev, drbd_disks_t mins)
{
int io_allowed;
@@ -1777,7 +1777,7 @@
dec_local(mdev);
return io_allowed;
}
-static inline int inc_local(drbd_dev* mdev)
+static inline int inc_local(drbd_dev *mdev)
{
return inc_local_if_state(mdev, Inconsistent);
}
@@ -1785,7 +1785,7 @@
/* this throttles on-the-fly application requests
* according to max_buffers settings;
* maybe re-implement using semaphores? */
-static inline int drbd_get_max_buffers(drbd_dev* mdev)
+static inline int drbd_get_max_buffers(drbd_dev *mdev)
{
int mxb = 1000000; /* arbitrary limit on open requests */
if (inc_net(mdev)) {
@@ -1795,7 +1795,7 @@
return mxb;
}
-static inline int __inc_ap_bio_cond(drbd_dev* mdev) {
+static inline int __inc_ap_bio_cond(drbd_dev *mdev) {
int mxb = drbd_get_max_buffers(mdev);
if (mdev->state.susp) return 0;
if (mdev->state.conn == WFBitMapS) return 0;
@@ -1810,7 +1810,7 @@
/* I'd like to use wait_event_lock_irq,
* but I'm not sure when it got introduced,
* and not sure when it has 3 or 4 arguments */
-static inline void inc_ap_bio(drbd_dev* mdev)
+static inline void inc_ap_bio(drbd_dev *mdev)
{
/* compare with after_state_ch,
* os.conn != WFBitMapS && ns.conn == WFBitMapS */
@@ -1835,7 +1835,7 @@
spin_unlock_irq(&mdev->req_lock);
}
-static inline void dec_ap_bio(drbd_dev* mdev)
+static inline void dec_ap_bio(drbd_dev *mdev)
{
int mxb = drbd_get_max_buffers(mdev);
int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
@@ -1860,7 +1860,7 @@
/* CAUTION: please no side effects in arguments! */
#define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b)))
-static inline void update_peer_seq(drbd_dev* mdev, unsigned int new_seq)
+static inline void update_peer_seq(drbd_dev *mdev, unsigned int new_seq)
{
unsigned int m;
spin_lock(&mdev->peer_seq_lock);
@@ -1870,7 +1870,7 @@
if (m == new_seq) wake_up(&mdev->seq_wait);
}
-static inline int drbd_queue_order_type(drbd_dev* mdev)
+static inline int drbd_queue_order_type(drbd_dev *mdev)
{
/* sorry, we currently have no working implementation
* of distributed TCQ stuff */
Modified: branches/drbd-8.0-for-linus/drbd/drbd_main.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_main.c 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/drbd_main.c 2007-07-24 13:25:49 UTC (rev 2993)
@@ -136,7 +136,7 @@
1) I want to hand out the preallocated objects first.
2) I want to be able to interrupt sleeping allocation with a signal.
Note: This is a single linked list, the next pointer is the private
- member of struct page.
+ member of struct page.
*/
struct page *drbd_pp_pool;
spinlock_t drbd_pp_lock;
@@ -310,7 +310,7 @@
* anyways. This is to make sure that you get a resynchronisation of
* the full device the next time you connect.
*/
-int drbd_io_error(drbd_dev* mdev, int forcedetach)
+int drbd_io_error(drbd_dev *mdev, int forcedetach)
{
enum io_error_handler eh;
unsigned long flags;
@@ -358,7 +358,7 @@
* Returns TRUE if this state change should be preformed as a cluster wide
* transaction. Of course it returns 0 as soon as the connection is lost.
*/
-STATIC int cl_wide_st_chg(drbd_dev* mdev, drbd_state_t os, drbd_state_t ns)
+STATIC int cl_wide_st_chg(drbd_dev *mdev, drbd_state_t os, drbd_state_t ns)
{
return ( os.conn >= Connected && ns.conn >= Connected &&
( ( os.role != Primary && ns.role == Primary ) ||
@@ -368,7 +368,7 @@
(os.conn >= Connected && ns.conn == Disconnecting);
}
-int drbd_change_state(drbd_dev* mdev, enum chg_state_flags f,
+int drbd_change_state(drbd_dev *mdev, enum chg_state_flags f,
drbd_state_t mask, drbd_state_t val)
{
unsigned long flags;
@@ -386,16 +386,16 @@
return rv;
}
-void drbd_force_state(drbd_dev* mdev, drbd_state_t mask, drbd_state_t val)
+void drbd_force_state(drbd_dev *mdev, drbd_state_t mask, drbd_state_t val)
{
drbd_change_state(mdev, ChgStateHard, mask, val);
}
-STATIC int is_valid_state(drbd_dev* mdev, drbd_state_t ns);
-STATIC int is_valid_state_transition(drbd_dev*, drbd_state_t, drbd_state_t);
+STATIC int is_valid_state(drbd_dev *mdev, drbd_state_t ns);
+STATIC int is_valid_state_transition(drbd_dev *, drbd_state_t, drbd_state_t);
STATIC int drbd_send_state_req(drbd_dev *, drbd_state_t, drbd_state_t);
-set_st_err_t _req_st_cond(drbd_dev* mdev, drbd_state_t mask, drbd_state_t val)
+set_st_err_t _req_st_cond(drbd_dev *mdev, drbd_state_t mask, drbd_state_t val)
{
drbd_state_t os, ns;
unsigned long flags;
@@ -430,7 +430,7 @@
* transition this function even does a cluster wide transaction.
* It has a cousin named drbd_request_state(), which is always verbose.
*/
-int _drbd_request_state(drbd_dev* mdev, drbd_state_t mask, drbd_state_t val,
+int _drbd_request_state(drbd_dev *mdev, drbd_state_t mask, drbd_state_t val,
enum chg_state_flags f)
{
unsigned long flags;
@@ -483,7 +483,7 @@
}
-STATIC void print_st(drbd_dev* mdev, char *name, drbd_state_t ns)
+STATIC void print_st(drbd_dev *mdev, char *name, drbd_state_t ns)
{
ERR(" %s = { cs:%s st:%s/%s ds:%s/%s %c%c%c%c }\n",
name,
@@ -499,7 +499,7 @@
);
}
-void print_st_err(drbd_dev* mdev, drbd_state_t os, drbd_state_t ns, int err)
+void print_st_err(drbd_dev *mdev, drbd_state_t os, drbd_state_t ns, int err)
{
ERR("State change failed: %s\n", set_st_err_name(err));
print_st(mdev, " state", os);
@@ -518,11 +518,11 @@
#define PSC(A) \
({ if (ns.A != os.A) { \
pbp += sprintf(pbp, #A "( %s -> %s ) ", \
- A##s_to_name(os.A), \
- A##s_to_name(ns.A)); \
+ A##s_to_name(os.A), \
+ A##s_to_name(ns.A)); \
} })
-STATIC int is_valid_state(drbd_dev* mdev, drbd_state_t ns)
+STATIC int is_valid_state(drbd_dev *mdev, drbd_state_t ns)
{
/* See drbd_state_sw_errors in drbd_strings.c */
@@ -573,7 +573,7 @@
return rv;
}
-STATIC int is_valid_state_transition(drbd_dev* mdev, drbd_state_t ns, drbd_state_t os)
+STATIC int is_valid_state_transition(drbd_dev *mdev, drbd_state_t ns, drbd_state_t os)
{
int rv = SS_Success;
@@ -589,7 +589,7 @@
return rv;
}
-int _drbd_set_state(drbd_dev* mdev, drbd_state_t ns, enum chg_state_flags flags)
+int _drbd_set_state(drbd_dev *mdev, drbd_state_t ns, enum chg_state_flags flags)
{
drbd_state_t os;
int rv = SS_Success, warn_sync_abort = 0;
@@ -641,7 +641,7 @@
if ( ns.conn >= Connected &&
( ns.disk == Consistent || ns.disk == Outdated ) ) {
- switch(ns.conn) {
+ switch (ns.conn) {
case WFBitMapT:
case PausedSyncT:
ns.disk = Outdated;
@@ -663,7 +663,7 @@
if ( ns.conn >= Connected &&
( ns.pdsk == Consistent || ns.pdsk == Outdated ) ) {
- switch(ns.conn) {
+ switch (ns.conn) {
case Connected:
case WFBitMapT:
case PausedSyncT:
@@ -816,7 +816,7 @@
return 1;
}
-void after_state_ch(drbd_dev* mdev, drbd_state_t os, drbd_state_t ns,
+void after_state_ch(drbd_dev *mdev, drbd_state_t os, drbd_state_t ns,
enum chg_state_flags flags)
{
enum fencing_policy fp;
@@ -1050,7 +1050,7 @@
spin_unlock(&thi->t_lock);
complete(&thi->startstop); /* notify: thi->task is set. */
- while(1) {
+ while (1) {
retval = thi->function(thi);
if (get_t_state(thi) != Restarting) break;
thi->t_state = Running;
@@ -1088,8 +1088,8 @@
/* INFO("drbd_thread_start: %s [%d]: %s %d -> Running\n",
current->comm, current->pid,
thi == &mdev->receiver ? "receiver" :
- thi == &mdev->asender ? "asender" :
- thi == &mdev->worker ? "worker" : "NONSENSE",
+ thi == &mdev->asender ? "asender" :
+ thi == &mdev->worker ? "worker" : "NONSENSE",
thi->t_state); */
if (thi->t_state == None) {
@@ -1199,7 +1199,7 @@
* when we hold the appropriate socket mutex.
*/
int drbd_send_cmd(drbd_dev *mdev, int use_data_socket,
- Drbd_Packet_Cmd cmd, Drbd_Header* h, size_t size)
+ Drbd_Packet_Cmd cmd, Drbd_Header *h, size_t size)
{
int ok = 0;
struct socket *sock;
@@ -1253,7 +1253,7 @@
p.rate = cpu_to_be32(sc->rate);
- return drbd_send_cmd(mdev, USE_DATA_SOCKET, SyncParam, (Drbd_Header*)&p, sizeof(p));
+ return drbd_send_cmd(mdev, USE_DATA_SOCKET, SyncParam, (Drbd_Header *)&p, sizeof(p));
}
int drbd_send_protocol(drbd_dev *mdev)
@@ -1268,7 +1268,7 @@
p.two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
return drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportProtocol,
- (Drbd_Header*)&p, sizeof(p));
+ (Drbd_Header *)&p, sizeof(p));
}
int drbd_send_uuids(drbd_dev *mdev)
@@ -1292,7 +1292,7 @@
dec_local(mdev);
return drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportUUIDs,
- (Drbd_Header*)&p, sizeof(p));
+ (Drbd_Header *)&p, sizeof(p));
}
int drbd_send_sync_uuid(drbd_dev *mdev, u64 val)
@@ -1302,7 +1302,7 @@
p.uuid = cpu_to_be64(val);
return drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportSyncUUID,
- (Drbd_Header*)&p, sizeof(p));
+ (Drbd_Header *)&p, sizeof(p));
}
int drbd_send_sizes(drbd_dev *mdev)
@@ -1332,7 +1332,7 @@
p.queue_order_type = cpu_to_be32(q_order_type);
ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportSizes,
- (Drbd_Header*)&p, sizeof(p));
+ (Drbd_Header *)&p, sizeof(p));
return ok;
}
@@ -1343,7 +1343,7 @@
p.state = cpu_to_be32(mdev->state.i);
return drbd_send_cmd(mdev, USE_DATA_SOCKET, ReportState,
- (Drbd_Header*)&p, sizeof(p));
+ (Drbd_Header *)&p, sizeof(p));
}
STATIC int drbd_send_state_req(drbd_dev *mdev, drbd_state_t mask, drbd_state_t val)
@@ -1354,7 +1354,7 @@
p.val = cpu_to_be32(val.i);
return drbd_send_cmd(mdev, USE_DATA_SOCKET, StateChgRequest,
- (Drbd_Header*)&p, sizeof(p));
+ (Drbd_Header *)&p, sizeof(p));
}
int drbd_send_sr_reply(drbd_dev *mdev, int retcode)
@@ -1364,7 +1364,7 @@
p.retcode = cpu_to_be32(retcode);
return drbd_send_cmd(mdev, USE_META_SOCKET, StateChgReply,
- (Drbd_Header*)&p, sizeof(p));
+ (Drbd_Header *)&p, sizeof(p));
}
@@ -1434,7 +1434,7 @@
p.barrier = barrier_nr;
p.set_size = cpu_to_be32(set_size);
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, BarrierAck, (Drbd_Header*)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_META_SOCKET, BarrierAck, (Drbd_Header *)&p, sizeof(p));
return ok;
}
@@ -1457,7 +1457,7 @@
p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
if (!mdev->meta.socket || mdev->state.conn < Connected) return FALSE;
- ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, (Drbd_Header*)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, (Drbd_Header *)&p, sizeof(p));
return ok;
}
@@ -1465,7 +1465,7 @@
Drbd_Data_Packet *dp)
{
const int header_size = sizeof(Drbd_Data_Packet) - sizeof(Drbd_Header);
- int data_size = ((Drbd_Header*)dp)->length - header_size;
+ int data_size = ((Drbd_Header *)dp)->length - header_size;
return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
@@ -1497,7 +1497,7 @@
/* FIXME BIO_RW_SYNC ? */
- ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, (Drbd_Header*)&p, sizeof(p));
+ ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, (Drbd_Header *)&p, sizeof(p));
return ok;
}
@@ -1550,7 +1550,7 @@
As a workaround, we disable sendpage on pages with page_count == 0 or PageSlab.
*/
int _drbd_no_send_page(drbd_dev *mdev, struct page *page,
- int offset, size_t size)
+ int offset, size_t size)
{
int ret;
ret = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, 0);
@@ -1620,7 +1620,7 @@
len -= sent;
offset += sent;
/* FIXME test "last_received" ... */
- } while(len > 0 /* THINK && mdev->cstate >= Connected*/);
+ } while (len > 0 /* THINK && mdev->cstate >= Connected*/);
set_fs(oldfs);
out:
@@ -1813,7 +1813,7 @@
sent += rv;
iov.iov_base += rv;
iov.iov_len -= rv;
- } while(sent < size);
+ } while (sent < size);
#if !HAVE_KERNEL_SENDMSG
set_fs(oldfs);
@@ -2078,7 +2078,7 @@
{
struct page *page;
- while(drbd_pp_pool) {
+ while (drbd_pp_pool) {
page = drbd_pp_pool;
drbd_pp_pool = (struct page *)page_private(page);
__free_page(page);
@@ -2139,7 +2139,7 @@
/* drbd's page pool */
spin_lock_init(&drbd_pp_lock);
- for (i = 0;i < number;i++) {
+ for (i = 0; i < number; i++) {
page = alloc_page(GFP_HIGHUSER);
if (!page) goto Enomem;
set_page_private(page, (unsigned long)drbd_pp_pool);
@@ -2744,7 +2744,7 @@
STATIC void md_sync_timer_fn(unsigned long data)
{
- drbd_dev* mdev = (drbd_dev*) data;
+ drbd_dev *mdev = (drbd_dev *) data;
drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
}
@@ -2845,30 +2845,30 @@
/*
-
-drbd_print_buffer
-
-This routine dumps binary data to the debugging output. Can be
-called at interrupt level.
-
-Arguments:
-
- prefix - String is output at the beginning of each line output
- flags - Control operation of the routine. Currently defined
- Flags are:
- DBGPRINT_BUFFADDR; if set, each line starts with the
- virtual address of the line being outupt. If clear,
- each line starts with the offset from the beginning
- of the buffer.
- size - Indicates the size of each entry in the buffer. Supported
- values are sizeof(char), sizeof(short) and sizeof(int)
- buffer - Start address of buffer
- buffer_va - Virtual address of start of buffer (normally the same
- as Buffer, but having it separate allows it to hold
- file address for example)
- length - length of buffer
-
-*/
+ *
+ * drbd_print_buffer
+ *
+ * This routine dumps binary data to the debugging output. Can be
+ * called at interrupt level.
+ *
+ * Arguments:
+ *
+ * prefix - String is output at the beginning of each line output
+ * flags - Control operation of the routine. Currently defined
+ * Flags are:
+ * DBGPRINT_BUFFADDR; if set, each line starts with the
+ * virtual address of the line being outupt. If clear,
+ * each line starts with the offset from the beginning
+ * of the buffer.
+ * size - Indicates the size of each entry in the buffer. Supported
+ * values are sizeof(char), sizeof(short) and sizeof(int)
+ * buffer - Start address of buffer
+ * buffer_va - Virtual address of start of buffer (normally the same
+ * as Buffer, but having it separate allows it to hold
+ * file address for example)
+ * length - length of buffer
+ *
+ */
void
drbd_print_buffer(const char *prefix, unsigned int flags, int size,
const void *buffer, const void *buffer_va,
Modified: branches/drbd-8.0-for-linus/drbd/drbd_nl.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_nl.c 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/drbd_nl.c 2007-07-24 13:25:49 UTC (rev 2993)
@@ -52,9 +52,9 @@
int tag; \
int dlen; \
\
- while( (tag = *tags++) != TT_END ) { \
+ while ( (tag = *tags++) != TT_END ) { \
dlen = *tags++; \
- switch( tag_number(tag) ) { \
+ switch ( tag_number(tag) ) { \
fields \
default: \
if (tag & T_MANDATORY) { \
@@ -157,7 +157,7 @@
"Netlink: >> %s (%d) - seq: %x, ack: %x, len: %x\n",
nlp->minor,
nlp->packet_type == P_nl_after_last_packet?
- "Empty-Reply" : nl_packet_name(nlp->packet_type),
+ "Empty-Reply" : nl_packet_name(nlp->packet_type),
nlp->packet_type,
req->seq, req->ack, req->len);
}
@@ -199,7 +199,7 @@
r = drbd_khelper(mdev, "outdate-peer");
- switch( (r>>8) & 0xff ) {
+ switch ( (r>>8) & 0xff ) {
case 3: /* peer is inconsistent */
nps = Inconsistent;
break;
@@ -268,7 +268,7 @@
val.pdsk = nps;
mask.pdsk = disk_mask;
-
+
continue;
}
@@ -306,7 +306,7 @@
/* Wait until nothing is on the fly :) */
if ( wait_event_interruptible( mdev->misc_wait,
- atomic_read(&mdev->ap_pending_cnt) == 0 ) ) {
+ atomic_read(&mdev->ap_pending_cnt) == 0 ) ) {
r = GotSignal;
goto fail;
}
@@ -390,7 +390,7 @@
struct drbd_backing_dev *bdev)
{
sector_t md_size_sect = 0;
- switch(bdev->dc.meta_dev_idx) {
+ switch (bdev->dc.meta_dev_idx) {
default:
/* v07 style fixed size indexed meta data */
bdev->md.md_size_sect = MD_RESERVED_SECT;
@@ -410,7 +410,7 @@
bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
/* al size is still fixed */
bdev->md.al_offset = -MD_AL_MAX_SIZE;
- /* LGE FIXME max size check missing. */
+ /* LGE FIXME max size check missing. */
/* we need (slightly less than) ~ this much bitmap sectors: */
md_size_sect = drbd_get_capacity(bdev->backing_bdev);
md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
@@ -495,7 +495,7 @@
/* LGE: flexible device size!! is this the right thing to test? */
md_moved = prev_first_sect != drbd_md_first_sector(mdev->bc)
- || prev_size != mdev->bc->md.md_size_sect;
+ || prev_size != mdev->bc->md.md_size_sect;
if (md_moved) {
WARN("Moving meta-data.\n");
@@ -630,7 +630,7 @@
DUMPI(b->seg_boundary_mask);
);
- q->max_sectors = max_seg_s >> 9;
+ q->max_sectors = max_seg_s >> 9;
q->max_phys_segments = max_seg_s >> PAGE_SHIFT;
q->max_hw_segments = max_seg_s >> PAGE_SHIFT;
q->max_segment_size = max_seg_s;
@@ -685,11 +685,11 @@
goto fail;
}
- /*
- * We may have gotten here very quickly from a detach. Wait for a bit
- * then fail.
- */
- while(mdev->bc != NULL) {
+ /*
+ * We may have gotten here very quickly from a detach. Wait for a bit
+ * then fail.
+ */
+ while (mdev->bc != NULL) {
if (ntries++ >= 5) {
WARN("drbd_nl_disk_conf: mdev->bc not NULL.\n");
retcode = HaveDiskConfig;
@@ -763,7 +763,7 @@
nbc->backing_bdev = inode->i_bdev;
if (bd_claim(nbc->backing_bdev, mdev)) {
printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
- nbc->backing_bdev, mdev,
+ nbc->backing_bdev, mdev,
nbc->backing_bdev->bd_holder,
nbc->backing_bdev->bd_contains->bd_holder,
nbc->backing_bdev->bd_holders);
@@ -955,8 +955,8 @@
drbd_bm_unlock(mdev);
if (inc_local_if_state(mdev, Attaching)) {
- if (mdev->state.role == Primary) mdev->bc->md.uuid[Current] |= (u64)1;
- else mdev->bc->md.uuid[Current] &= ~(u64)1;
+ if (mdev->state.role == Primary) mdev->bc->md.uuid[Current] |= (u64)1;
+ else mdev->bc->md.uuid[Current] &= ~(u64)1;
dec_local(mdev);
}
@@ -1025,22 +1025,22 @@
dec_local(mdev);
} else {
memset(new_conf, 0, sizeof(struct net_conf));
- new_conf->timeout = DRBD_TIMEOUT_DEF;
+ new_conf->timeout = DRBD_TIMEOUT_DEF;
new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
- new_conf->ping_int = DRBD_PING_INT_DEF;
+ new_conf->ping_int = DRBD_PING_INT_DEF;
new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
- new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
+ new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
- new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
- new_conf->ko_count = DRBD_KO_COUNT_DEF;
- new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
- new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
- new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
- new_conf->want_lose = 0;
+ new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
+ new_conf->ko_count = DRBD_KO_COUNT_DEF;
+ new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
+ new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
+ new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
+ new_conf->want_lose = 0;
new_conf->two_primaries = 0;
new_conf->wire_protocol = DRBD_PROT_C;
- new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
- new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
+ new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
+ new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
}
if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
@@ -1063,7 +1063,7 @@
#define O_ADDR(A) (((struct sockaddr_in *)&A->peer_addr)->sin_addr.s_addr)
#define O_PORT(A) (((struct sockaddr_in *)&A->peer_addr)->sin_port)
retcode = NoError;
- for(i = 0;i < minor_count;i++) {
+ for (i = 0; i < minor_count; i++) {
odev = minor_to_mdev(i);
if (!odev || odev == mdev) continue;
if ( inc_net(odev)) {
@@ -1127,10 +1127,10 @@
*
* XXX maybe rather store the value scaled to jiffies?
* Note: MAX_SCHEDULE_TIMEOUT/HZ*HZ != MAX_SCHEDULE_TIMEOUT
- * and HZ > 10; which is unlikely to change...
- * Thus, if interrupted by a signal,
- * sock_{send,recv}msg returns -EINTR,
- * if the timeout expires, -EAGAIN.
+ * and HZ > 10; which is unlikely to change...
+ * Thus, if interrupted by a signal,
+ * sock_{send,recv}msg returns -EINTR,
+ * if the timeout expires, -EAGAIN.
*/
/* unlikely: someone disabled the timeouts ...
* just put some huge values in there. */
@@ -1293,7 +1293,7 @@
goto fail;
}
odev = minor_to_mdev(sc.after); /* check against loops in */
- while(1) {
+ while (1) {
if (odev == mdev) {
retcode = SyncAfterCycle;
goto fail;
Modified: branches/drbd-8.0-for-linus/drbd/drbd_receiver.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_receiver.c 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/drbd_receiver.c 2007-07-24 13:25:49 UTC (rev 2993)
@@ -88,7 +88,7 @@
mdev_to_minor(mdev), t);
break;
}
- } while(le != list);
+ } while (le != list);
le = list;
do {
@@ -106,7 +106,7 @@
mdev_to_minor(mdev), t);
break;
}
- } while(le != list);
+ } while (le != list);
if (forward != backward) {
printk(KERN_ERR DEVICE_NAME "%d: forward=%d, backward=%d\n",
@@ -261,7 +261,7 @@
bio->bi_sector = sector;
ds = data_size;
- while(ds) {
+ while (ds) {
page = drbd_pp_alloc(mdev, gfp_mask);
if (!page) {
ERR("alloc_ee: Allocation of a page failed\n");
@@ -354,7 +354,7 @@
struct list_head *le;
spin_lock_irq(&mdev->req_lock);
- while(!list_empty(list)) {
+ while (!list_empty(list)) {
le = list->next;
list_del(le);
e = list_entry(le, struct Tl_epoch_entry, w.list);
@@ -440,7 +440,7 @@
reclaim_net_ee(mdev);
- while(!list_empty(&mdev->done_ee)) {
+ while (!list_empty(&mdev->done_ee)) {
le = mdev->done_ee.next;
list_del(le);
e = list_entry(le, struct Tl_epoch_entry, w.list);
@@ -555,13 +555,13 @@
oldfs = get_fs();
set_fs(KERNEL_DS);
- for(;;) {
+ for (;;) {
rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
if (rv == size) break;
/* Note:
- * ECONNRESET other side closed the connection
- * ERESTARTSYS (on sock) we got a signal
+ * ECONNRESET other side closed the connection
+ * ERESTARTSYS (on sock) we got a signal
*/
if (rv < 0) {
@@ -573,7 +573,7 @@
} else if (rv == 0) {
INFO("sock was shut down by peer\n");
break;
- } else {
+ } else {
/* signal came in, or peer/link went down,
* after we read a partial message
*/
@@ -607,11 +607,11 @@
sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
/* explicitly bind to the configured IP as source IP
- for the outgoing connections.
- This is needed for multihomed hosts and to be
- able to use lo: interfaces for drbd.
- Make sure to use 0 as portnumber, so linux selects
- a free one dynamically.
+ * for the outgoing connections.
+ * This is needed for multihomed hosts and to be
+ * able to use lo: interfaces for drbd.
+ * Make sure to use 0 as portnumber, so linux selects
+ * a free one dynamically.
*/
memcpy (&src_in, &(mdev->net_conf->my_addr), sizeof(struct sockaddr_in));
src_in.sin_port = 0;
@@ -720,7 +720,7 @@
msock = NULL;
do {
- for(try = 0;;) { /* 3 tries, this should take less than a second! */
+ for (try = 0;;) { /* 3 tries, this should take less than a second! */
s = drbd_try_connect(mdev);
if (s || ++try >= 3) break;
/* give the other side time to call bind() & listen() */
@@ -753,7 +753,7 @@
s = drbd_wait_for_connect(mdev);
if (s) {
- switch(drbd_recv_fp(mdev, s)) {
+ switch (drbd_recv_fp(mdev, s)) {
case HandShakeS:
if (sock) sock_release(sock);
sock = s;
@@ -780,7 +780,7 @@
}
}
- } while( !sock || !msock );
+ } while ( !sock || !msock );
msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
@@ -863,11 +863,11 @@
return TRUE;
}
-STATIC int receive_Barrier_no_tcq(drbd_dev *mdev, Drbd_Header* h)
+STATIC int receive_Barrier_no_tcq(drbd_dev *mdev, Drbd_Header *h)
{
int rv;
int epoch_size;
- Drbd_Barrier_Packet *p = (Drbd_Barrier_Packet*)h;
+ Drbd_Barrier_Packet *p = (Drbd_Barrier_Packet *)h;
ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
@@ -948,7 +948,7 @@
page = drbd_pp_alloc(mdev, GFP_KERNEL);
data = kmap(page);
- while(data_size) {
+ while (data_size) {
rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
if ( rr != min_t(int, data_size, PAGE_SIZE) ) {
rv = 0;
@@ -1060,13 +1060,13 @@
return TRUE;
}
-STATIC int receive_DataReply(drbd_dev *mdev, Drbd_Header* h)
+STATIC int receive_DataReply(drbd_dev *mdev, Drbd_Header *h)
{
drbd_request_t *req;
sector_t sector;
unsigned int header_size, data_size;
int ok;
- Drbd_Data_Packet *p = (Drbd_Data_Packet*)h;
+ Drbd_Data_Packet *p = (Drbd_Data_Packet *)h;
header_size = sizeof(*p) - sizeof(*h);
data_size = h->length - header_size;
@@ -1104,12 +1104,12 @@
return ok;
}
-STATIC int receive_RSDataReply(drbd_dev *mdev, Drbd_Header* h)
+STATIC int receive_RSDataReply(drbd_dev *mdev, Drbd_Header *h)
{
sector_t sector;
unsigned int header_size, data_size;
int ok;
- Drbd_Data_Packet *p = (Drbd_Data_Packet*)h;
+ Drbd_Data_Packet *p = (Drbd_Data_Packet *)h;
header_size = sizeof(*p) - sizeof(*h);
data_size = h->length - header_size;
@@ -1269,11 +1269,11 @@
}
/* mirrored write */
-STATIC int receive_Data(drbd_dev *mdev, Drbd_Header* h)
+STATIC int receive_Data(drbd_dev *mdev, Drbd_Header *h)
{
sector_t sector;
struct Tl_epoch_entry *e;
- Drbd_Data_Packet *p = (Drbd_Data_Packet*)h;
+ Drbd_Data_Packet *p = (Drbd_Data_Packet *)h;
int header_size, data_size;
unsigned int barrier_nr = 0;
unsigned int epoch_size = 0;
@@ -1365,18 +1365,18 @@
*
* if any conflicting request is found that has not yet been acked,
* AND I have the "discard concurrent writes" flag:
- * queue (via done_ee) the DiscardAck; OUT.
+ * queue (via done_ee) the DiscardAck; OUT.
*
* if any conflicting request is found:
- * block the receiver, waiting on misc_wait
- * until no more conflicting requests are there,
- * or we get interrupted (disconnect).
+ * block the receiver, waiting on misc_wait
+ * until no more conflicting requests are there,
+ * or we get interrupted (disconnect).
*
- * we do not just write after local io completion of those
- * requests, but only after req is done completely, i.e.
- * we wait for the DiscardAck to arrive!
+ * we do not just write after local io completion of those
+ * requests, but only after req is done completely, i.e.
+ * we wait for the DiscardAck to arrive!
*
- * then proceed normally, i.e. submit.
+ * then proceed normally, i.e. submit.
*/
if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
goto out_interrupted;
@@ -1388,7 +1388,7 @@
#define OVERLAPS overlaps(i->sector, i->size, sector, size)
slot = tl_hash_slot(mdev, sector);
first = 1;
- for(;;) {
+ for (;;) {
int have_unacked = 0;
int have_conflict = 0;
prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
@@ -1522,7 +1522,7 @@
(void)drbd_send_b_ack(mdev, cpu_to_be32(barrier_nr), epoch_size);
}
- switch(mdev->net_conf->wire_protocol) {
+ switch (mdev->net_conf->wire_protocol) {
case DRBD_PROT_C:
inc_unacked(mdev);
/* corresponding dec_unacked() in e_end_block()
@@ -1572,7 +1572,7 @@
struct Tl_epoch_entry *e;
int size;
unsigned int fault_type;
- Drbd_BlockRequest_Packet *p = (Drbd_BlockRequest_Packet*)h;
+ Drbd_BlockRequest_Packet *p = (Drbd_BlockRequest_Packet *)h;
ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
@@ -1691,7 +1691,7 @@
}
if (mdev->net_conf->after_sb_0p == DiscardZeroChg) break;
case DiscardLeastChg:
- if ( ch_self < ch_peer ) rv = -1;
+ if ( ch_self < ch_peer ) rv = -1;
else if (ch_self > ch_peer) rv = 1;
else /* ( ch_self == ch_peer ) */ {
/* Well, then use something else. */
@@ -1800,14 +1800,14 @@
}
/*
- 100 after split brain try auto recover
- 2 SyncSource set BitMap
- 1 SyncSource use BitMap
- 0 no Sync
- -1 SyncTarget use BitMap
- -2 SyncTarget set BitMap
- -100 after split brain, disconnect
--1000 unrelated data
+ 100 after split brain try auto recover
+ 2 SyncSource set BitMap
+ 1 SyncSource use BitMap
+ 0 no Sync
+ -1 SyncTarget use BitMap
+ -2 SyncTarget set BitMap
+ -100 after split brain, disconnect
+-1000 unrelated data
*/
STATIC int drbd_uuid_compare(drbd_dev *mdev, int *rule_nr)
{
@@ -1840,7 +1840,7 @@
MTRACE(TraceTypeUuid, TraceLvlMetrics, DUMPI(rct); );
- switch(rct) {
+ switch (rct) {
case 0: /* !self_pri && !peer_pri */ return 0;
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
@@ -1981,7 +1981,7 @@
if (hg < 0 && /* by intention we do not use mydisk here. */
mdev->state.role == Primary && mdev->state.disk >= Consistent ) {
- switch(mdev->net_conf->rr_conflict) {
+ switch (mdev->net_conf->rr_conflict) {
case CallHelper:
drbd_khelper(mdev, "pri-lost");
/* fall through */
@@ -2045,7 +2045,7 @@
STATIC int receive_protocol(drbd_dev *mdev, Drbd_Header *h)
{
- Drbd_Protocol_Packet *p = (Drbd_Protocol_Packet*)h;
+ Drbd_Protocol_Packet *p = (Drbd_Protocol_Packet *)h;
int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
int p_want_lose, p_two_primaries;
@@ -2054,11 +2054,11 @@
if (drbd_recv(mdev, h->payload, h->length) != h->length)
return FALSE;
- p_proto = be32_to_cpu(p->protocol);
- p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
- p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
- p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
- p_want_lose = be32_to_cpu(p->want_lose);
+ p_proto = be32_to_cpu(p->protocol);
+ p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
+ p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
+ p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
+ p_want_lose = be32_to_cpu(p->want_lose);
p_two_primaries = be32_to_cpu(p->two_primaries);
if (p_proto != mdev->net_conf->wire_protocol) {
@@ -2101,14 +2101,14 @@
STATIC int receive_SyncParam(drbd_dev *mdev, Drbd_Header *h)
{
int ok = TRUE;
- Drbd_SyncParam_Packet *p = (Drbd_SyncParam_Packet*)h;
+ Drbd_SyncParam_Packet *p = (Drbd_SyncParam_Packet *)h;
ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
if (drbd_recv(mdev, h->payload, h->length) != h->length)
return FALSE;
/* XXX harmless race with ioctl ... */
- mdev->sync_conf.rate = be32_to_cpu(p->rate);
+ mdev->sync_conf.rate = be32_to_cpu(p->rate);
return ok;
}
@@ -2132,7 +2132,7 @@
STATIC int receive_sizes(drbd_dev *mdev, Drbd_Header *h)
{
- Drbd_Sizes_Packet *p = (Drbd_Sizes_Packet*)h;
+ Drbd_Sizes_Packet *p = (Drbd_Sizes_Packet *)h;
unsigned int max_seg_s;
sector_t p_size, p_usize, my_usize;
drbd_conns_t nconn;
@@ -2237,7 +2237,7 @@
STATIC int receive_uuids(drbd_dev *mdev, Drbd_Header *h)
{
- Drbd_GenCnt_Packet *p = (Drbd_GenCnt_Packet*)h;
+ Drbd_GenCnt_Packet *p = (Drbd_GenCnt_Packet *)h;
u64 *p_uuid;
int i;
@@ -2288,7 +2288,7 @@
STATIC int receive_req_state(drbd_dev *mdev, Drbd_Header *h)
{
- Drbd_Req_State_Packet *p = (Drbd_Req_State_Packet*)h;
+ Drbd_Req_State_Packet *p = (Drbd_Req_State_Packet *)h;
drbd_state_t mask, val;
int rv;
@@ -2316,7 +2316,7 @@
STATIC int receive_state(drbd_dev *mdev, Drbd_Header *h)
{
- Drbd_State_Packet *p = (Drbd_State_Packet*)h;
+ Drbd_State_Packet *p = (Drbd_State_Packet *)h;
drbd_conns_t nconn, oconn;
drbd_state_t os, ns, peer_state;
int rv;
@@ -2387,7 +2387,7 @@
STATIC int receive_sync_uuid(drbd_dev *mdev, Drbd_Header *h)
{
- Drbd_SyncUUID_Packet *p = (Drbd_SyncUUID_Packet*)h;
+ Drbd_SyncUUID_Packet *p = (Drbd_SyncUUID_Packet *)h;
wait_event( mdev->misc_wait,
mdev->state.conn < Connected || mdev->state.conn == WFSyncUUID);
@@ -2425,8 +2425,8 @@
drbd_bm_lock(mdev);
bm_words = drbd_bm_words(mdev);
- bm_i = 0;
- buffer = vmalloc(BM_PACKET_WORDS*sizeof(long));
+ bm_i = 0;
+ buffer = vmalloc(BM_PACKET_WORDS*sizeof(long));
while (1) {
num_words = min_t(size_t, BM_PACKET_WORDS, bm_words-bm_i );
@@ -2488,27 +2488,27 @@
return TRUE; /* cannot fail. */
}
-typedef int (*drbd_cmd_handler_f)(drbd_dev*, Drbd_Header*);
+typedef int (*drbd_cmd_handler_f)(drbd_dev *, Drbd_Header *);
static drbd_cmd_handler_f drbd_default_handler[] = {
- [Data] = receive_Data,
- [DataReply] = receive_DataReply,
- [RSDataReply] = receive_RSDataReply,
- [RecvAck] = NULL, /* via msock: got_RecvAck, */
- [WriteAck] = NULL, /* via msock: got_WriteAck, */
- [Barrier] = receive_Barrier_no_tcq,
- [BarrierAck] = NULL, /* via msock: got_BarrierAck, */
- [ReportBitMap] = receive_bitmap,
- [Ping] = NULL, /* via msock: got_Ping, */
- [PingAck] = NULL, /* via msock: got_PingAck, */
- [UnplugRemote] = receive_UnplugRemote,
- [DataRequest] = receive_DataRequest,
+ [Data] = receive_Data,
+ [DataReply] = receive_DataReply,
+ [RSDataReply] = receive_RSDataReply,
+ [RecvAck] = NULL, /* via msock: got_RecvAck, */
+ [WriteAck] = NULL, /* via msock: got_WriteAck, */
+ [Barrier] = receive_Barrier_no_tcq,
+ [BarrierAck] = NULL, /* via msock: got_BarrierAck, */
+ [ReportBitMap] = receive_bitmap,
+ [Ping] = NULL, /* via msock: got_Ping, */
+ [PingAck] = NULL, /* via msock: got_PingAck, */
+ [UnplugRemote] = receive_UnplugRemote,
+ [DataRequest] = receive_DataRequest,
[RSDataRequest] = receive_DataRequest, /* receive_RSDataRequest, */
- [SyncParam] = receive_SyncParam,
+ [SyncParam] = receive_SyncParam,
[ReportProtocol] = receive_protocol,
- [ReportUUIDs] = receive_uuids,
- [ReportSizes] = receive_sizes,
- [ReportState] = receive_state,
+ [ReportUUIDs] = receive_uuids,
+ [ReportSizes] = receive_sizes,
+ [ReportState] = receive_state,
[StateChgRequest] = receive_req_state,
[ReportSyncUUID] = receive_sync_uuid,
};
@@ -2565,7 +2565,7 @@
* Application READ requests
*/
spin_lock_irq(&mdev->req_lock);
- for(i = 0;i < APP_R_HSIZE;i++) {
+ for (i = 0; i < APP_R_HSIZE; i++) {
slot = mdev->app_reads_hash+i;
hlist_for_each_entry(req, n, slot, colision) {
list_add(&req->w.list, &workset);
@@ -2573,7 +2573,7 @@
}
memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
- while(!list_empty(&workset)) {
+ while (!list_empty(&workset)) {
le = workset.next;
req = list_entry(le, drbd_request_t, w.list);
list_del(le);
@@ -2623,15 +2623,15 @@
drbd_fail_pending_reads(mdev);
/* We do not have data structures that would allow us to
- get the rs_pending_cnt down to 0 again.
- * On SyncTarget we do not have any data structures describing
- the pending RSDataRequest's we have sent.
- * On SyncSource there is no data structure that tracks
- the RSDataReply blocks that we sent to the SyncTarget.
- And no, it is not the sum of the reference counts in the
- resync_LRU. The resync_LRU tracks the whole operation including
- the disk-IO, while the rs_pending_cnt only tracks the blocks
- on the fly. */
+ * get the rs_pending_cnt down to 0 again.
+ * * On SyncTarget we do not have any data structures describing
+ * the pending RSDataRequest's we have sent.
+ * * On SyncSource there is no data structure that tracks
+ * the RSDataReply blocks that we sent to the SyncTarget.
+ * And no, it is not the sum of the reference counts in the
+ * resync_LRU. The resync_LRU tracks the whole operation including
+ * the disk-IO, while the rs_pending_cnt only tracks the blocks
+ * on the fly. */
drbd_rs_cancel_all(mdev);
mdev->rs_total = 0;
mdev->rs_failed = 0;
@@ -2750,7 +2750,7 @@
memset(p, 0, sizeof(*p));
p->protocol_version = cpu_to_be32(PRO_VERSION);
ok = _drbd_send_cmd( mdev, mdev->data.socket, HandShake,
- (Drbd_Header *)p, sizeof(*p), 0 );
+ (Drbd_Header *)p, sizeof(*p), 0 );
up(&mdev->data.mutex);
return ok;
}
@@ -3033,9 +3033,9 @@
/* ********* acknowledge sender ******** */
-STATIC int got_RqSReply(drbd_dev *mdev, Drbd_Header* h)
+STATIC int got_RqSReply(drbd_dev *mdev, Drbd_Header *h)
{
- Drbd_RqS_Reply_Packet *p = (Drbd_RqS_Reply_Packet*)h;
+ Drbd_RqS_Reply_Packet *p = (Drbd_RqS_Reply_Packet *)h;
int retcode = be32_to_cpu(p->retcode);
@@ -3051,13 +3051,13 @@
return TRUE;
}
-STATIC int got_Ping(drbd_dev *mdev, Drbd_Header* h)
+STATIC int got_Ping(drbd_dev *mdev, Drbd_Header *h)
{
return drbd_send_ping_ack(mdev);
}
-STATIC int got_PingAck(drbd_dev *mdev, Drbd_Header* h)
+STATIC int got_PingAck(drbd_dev *mdev, Drbd_Header *h)
{
/* restore idle timeout */
mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
@@ -3065,10 +3065,10 @@
return TRUE;
}
-STATIC int got_BlockAck(drbd_dev *mdev, Drbd_Header* h)
+STATIC int got_BlockAck(drbd_dev *mdev, Drbd_Header *h)
{
drbd_request_t *req;
- Drbd_BlockAck_Packet *p = (Drbd_BlockAck_Packet*)h;
+ Drbd_BlockAck_Packet *p = (Drbd_BlockAck_Packet *)h;
sector_t sector = be64_to_cpu(p->sector);
int blksize = be32_to_cpu(p->blksize);
@@ -3117,9 +3117,9 @@
return TRUE;
}
-STATIC int got_NegAck(drbd_dev *mdev, Drbd_Header* h)
+STATIC int got_NegAck(drbd_dev *mdev, Drbd_Header *h)
{
- Drbd_BlockAck_Packet *p = (Drbd_BlockAck_Packet*)h;
+ Drbd_BlockAck_Packet *p = (Drbd_BlockAck_Packet *)h;
sector_t sector = be64_to_cpu(p->sector);
drbd_request_t *req;
@@ -3150,10 +3150,10 @@
return TRUE;
}
-STATIC int got_NegDReply(drbd_dev *mdev, Drbd_Header* h)
+STATIC int got_NegDReply(drbd_dev *mdev, Drbd_Header *h)
{
drbd_request_t *req;
- Drbd_BlockAck_Packet *p = (Drbd_BlockAck_Packet*)h;
+ Drbd_BlockAck_Packet *p = (Drbd_BlockAck_Packet *)h;
sector_t sector = be64_to_cpu(p->sector);
spin_lock_irq(&mdev->req_lock);
@@ -3178,11 +3178,11 @@
return TRUE;
}
-STATIC int got_NegRSDReply(drbd_dev *mdev, Drbd_Header* h)
+STATIC int got_NegRSDReply(drbd_dev *mdev, Drbd_Header *h)
{
sector_t sector;
int size;
- Drbd_BlockAck_Packet *p = (Drbd_BlockAck_Packet*)h;
+ Drbd_BlockAck_Packet *p = (Drbd_BlockAck_Packet *)h;
sector = be64_to_cpu(p->sector);
size = be32_to_cpu(p->blksize);
@@ -3199,9 +3199,9 @@
return TRUE;
}
-STATIC int got_BarrierAck(drbd_dev *mdev, Drbd_Header* h)
+STATIC int got_BarrierAck(drbd_dev *mdev, Drbd_Header *h)
{
- Drbd_BarrierAck_Packet *p = (Drbd_BarrierAck_Packet*)h;
+ Drbd_BarrierAck_Packet *p = (Drbd_BarrierAck_Packet *)h;
tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
dec_ap_pending(mdev);
@@ -3211,7 +3211,7 @@
struct asender_cmd {
size_t pkt_size;
- int (*process)(drbd_dev *mdev, Drbd_Header* h);
+ int (*process)(drbd_dev *mdev, Drbd_Header *h);
};
int drbd_asender(struct Drbd_thread *thi)
@@ -3227,16 +3227,16 @@
int empty;
static struct asender_cmd asender_tbl[] = {
- [Ping] = { sizeof(Drbd_Header), got_Ping },
- [PingAck] = { sizeof(Drbd_Header), got_PingAck },
- [RecvAck] = { sizeof(Drbd_BlockAck_Packet), got_BlockAck },
- [WriteAck] = { sizeof(Drbd_BlockAck_Packet), got_BlockAck },
- [RSWriteAck] = { sizeof(Drbd_BlockAck_Packet), got_BlockAck },
- [DiscardAck] = { sizeof(Drbd_BlockAck_Packet), got_BlockAck },
- [NegAck] = { sizeof(Drbd_BlockAck_Packet), got_NegAck },
- [NegDReply] = { sizeof(Drbd_BlockAck_Packet), got_NegDReply },
- [NegRSDReply] = { sizeof(Drbd_BlockAck_Packet), got_NegRSDReply},
- [BarrierAck] = { sizeof(Drbd_BarrierAck_Packet), got_BarrierAck },
+ [Ping] = { sizeof(Drbd_Header), got_Ping },
+ [PingAck] = { sizeof(Drbd_Header), got_PingAck },
+ [RecvAck] = { sizeof(Drbd_BlockAck_Packet), got_BlockAck },
+ [WriteAck] = { sizeof(Drbd_BlockAck_Packet), got_BlockAck },
+ [RSWriteAck] = { sizeof(Drbd_BlockAck_Packet), got_BlockAck },
+ [DiscardAck] = { sizeof(Drbd_BlockAck_Packet), got_BlockAck },
+ [NegAck] = { sizeof(Drbd_BlockAck_Packet), got_NegAck },
+ [NegDReply] = { sizeof(Drbd_BlockAck_Packet), got_NegDReply },
+ [NegRSDReply] = { sizeof(Drbd_BlockAck_Packet), got_NegRSDReply},
+ [BarrierAck] = { sizeof(Drbd_BarrierAck_Packet), got_BarrierAck },
[StateChgReply] = { sizeof(Drbd_RqS_Reply_Packet), got_RqSReply },
};
@@ -3252,7 +3252,7 @@
mdev->net_conf->ping_timeo*HZ/10;
}
- while(1) {
+ while (1) {
if (!drbd_process_done_ee(mdev)) {
ERR("process_done_ee() = NOT_OK\n");
goto err;
@@ -3276,18 +3276,18 @@
drbd_tcp_cork(mdev->meta.socket);
/* Note:
- * -EINTR (on meta) we got a signal
- * -EAGAIN (on meta) rcvtimeo expired
- * -ECONNRESET other side closed the connection
+ * -EINTR (on meta) we got a signal
+ * -EAGAIN (on meta) rcvtimeo expired
+ * -ECONNRESET other side closed the connection
* -ERESTARTSYS (on data) we got a signal
- * rv < 0 other than above: unexpected error!
+ * rv < 0 other than above: unexpected error!
* rv == expected: full header or command
* rv < expected: "woken" by signal during receive
- * rv == 0 : "connection shut down by peer"
+ * rv == 0 : "connection shut down by peer"
*/
if (likely(rv > 0)) {
received += rv;
- buf += rv;
+ buf += rv;
} else if (rv == 0) {
ERR("meta connection shut down by peer.\n");
goto err;
@@ -3326,10 +3326,10 @@
dump_packet(mdev, mdev->meta.socket, 1, (void *)h, __FILE__, __LINE__);
if (!asender_tbl[cmd].process(mdev, h)) goto err;
- buf = h;
+ buf = h;
received = 0;
- expect = sizeof(Drbd_Header);
- cmd = -1;
+ expect = sizeof(Drbd_Header);
+ cmd = -1;
}
}
Modified: branches/drbd-8.0-for-linus/drbd/drbd_req.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_req.c 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/drbd_req.c 2007-07-24 13:25:49 UTC (rev 2993)
@@ -145,7 +145,7 @@
dec_local(mdev);
} else {
WARN("Should have called drbd_al_complete_io(, %llu), "
- "but my Disk seems to have failed:(\n",
+ "but my Disk seems to have failed :(\n",
(unsigned long long) req->sector);
}
}
@@ -439,7 +439,7 @@
print_req_mod(req, what);
- switch(what) {
+ switch (what) {
default:
ERR("LOGIC BUG in %s:%u\n", __FILE__ , __LINE__ );
return;
@@ -912,7 +912,7 @@
* make sure that, if this is a write request and it triggered a
* barrier packet, this request is queued within the same spinlock. */
if (remote && mdev->unused_spare_barrier &&
- test_and_clear_bit(ISSUE_BARRIER, &mdev->flags)) {
+ test_and_clear_bit(ISSUE_BARRIER, &mdev->flags)) {
struct drbd_barrier *b = mdev->unused_spare_barrier;
b = _tl_add_barrier(mdev, b);
mdev->unused_spare_barrier = NULL;
@@ -993,7 +993,7 @@
if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR :
( rw == READ ? DRBD_FAULT_DT_RD :
- DRBD_FAULT_DT_RA ) ))
+ DRBD_FAULT_DT_RA ) ))
bio_endio(req->private_bio, req->private_bio->bi_size, -EIO);
else
generic_make_request(req->private_bio);
@@ -1017,7 +1017,7 @@
* return 1
* otherwise return 0
*/
-static int drbd_fail_request_early(drbd_dev* mdev, int is_write)
+static int drbd_fail_request_early(drbd_dev *mdev, int is_write)
{
/* Unconfigured */
if (mdev->state.conn == Disconnecting &&
@@ -1058,7 +1058,7 @@
int drbd_make_request_26(request_queue_t *q, struct bio *bio)
{
unsigned int s_enr, e_enr;
- struct Drbd_Conf *mdev = (drbd_dev*) q->queuedata;
+ struct Drbd_Conf *mdev = (drbd_dev *) q->queuedata;
if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
bio_endio(bio, bio->bi_size, -EPERM);
@@ -1133,7 +1133,7 @@
* we should use DRBD_MAX_SEGMENT_SIZE instead of AL_EXTENT_SIZE */
int drbd_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)
{
- struct Drbd_Conf *mdev = (drbd_dev*) q->queuedata;
+ struct Drbd_Conf *mdev = (drbd_dev *) q->queuedata;
unsigned int bio_offset = (unsigned int)bio->bi_sector << 9; /* 32 bit */
unsigned int bio_size = bio->bi_size;
int limit, backing_limit;
Modified: branches/drbd-8.0-for-linus/drbd/drbd_strings.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_strings.c 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/drbd_strings.c 2007-07-24 13:25:49 UTC (rev 2993)
@@ -78,29 +78,26 @@
[-SS_ResyncRunning] = "Can not start resync since it is already active",
[-SS_AlreadyStandAlone] = "Can not disconnect a StandAlone device",
[-SS_CW_FailedByPeer] = "State changed was refused by peer node",
- [-SS_IsDiskLess] =
+ [-SS_IsDiskLess] =
"Device is diskless, the requesed operation requires a disk",
[-SS_DeviceInUse] = "Device is held open by someone"
};
const char *conns_to_name(drbd_conns_t s) {
/* enums are unsigned... */
- return s > PausedSyncT ? "TOO_LARGE"
- : drbd_conn_s_names[s];
+ return s > PausedSyncT ? "TOO_LARGE" : drbd_conn_s_names[s];
}
const char *roles_to_name(drbd_role_t s) {
- return s > Secondary ? "TOO_LARGE"
- : drbd_role_s_names[s];
+ return s > Secondary ? "TOO_LARGE" : drbd_role_s_names[s];
}
const char *disks_to_name(drbd_disks_t s) {
- return s > UpToDate ? "TOO_LARGE"
- : drbd_disk_s_names[s];
+ return s > UpToDate ? "TOO_LARGE" : drbd_disk_s_names[s];
}
const char *set_st_err_name(set_st_err_t err) {
return err < SS_DeviceInUse ? "TOO_SMALL" :
err > SS_TwoPrimaries ? "TOO_LARGE"
- : drbd_state_sw_errors[-err];
+ : drbd_state_sw_errors[-err];
}
Modified: branches/drbd-8.0-for-linus/drbd/drbd_worker.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/drbd_worker.c 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/drbd_worker.c 2007-07-24 13:25:49 UTC (rev 2993)
@@ -213,8 +213,8 @@
/* to avoid recursion in _req_mod */
what = error
? (bio_data_dir(bio) == WRITE)
- ? write_completed_with_error
- : read_completed_with_error
+ ? write_completed_with_error
+ : read_completed_with_error
: completed_ok;
spin_lock_irqsave(&mdev->req_lock, flags);
_req_mod(req, what, error);
@@ -280,7 +280,7 @@
void resync_timer_fn(unsigned long data)
{
unsigned long flags;
- drbd_dev* mdev = (drbd_dev*) data;
+ drbd_dev *mdev = (drbd_dev *) data;
int queue;
spin_lock_irqsave(&mdev->req_lock, flags);
@@ -323,7 +323,7 @@
if (mdev->state.conn != SyncTarget)
ERR("%s in w_make_resync_request\n", conns_to_name(mdev->state.conn));
- number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
+ number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
if (atomic_read(&mdev->rs_pending_cnt) > number)
goto requeue;
@@ -339,7 +339,7 @@
return 1;
}
- for(i = 0;i < number;i++) {
+ for (i = 0; i < number; i++) {
next_sector:
size = BM_BLOCK_SIZE;
@@ -451,7 +451,7 @@
return 1;
}
-int drbd_resync_finished(drbd_dev* mdev)
+int drbd_resync_finished(drbd_dev *mdev)
{
unsigned long db, dt, dbdt;
int dstate, pdstate;
@@ -677,7 +677,7 @@
/* inc_ap_pending was done where this was queued.
* dec_ap_pending will be done in got_BarrierAck
* or (on connection loss) in w_clear_epoch. */
- ok = _drbd_send_cmd(mdev, mdev->data.socket, Barrier, (Drbd_Header*)p, sizeof(*p), 0);
+ ok = _drbd_send_cmd(mdev, mdev->data.socket, Barrier, (Drbd_Header *)p, sizeof(*p), 0);
drbd_put_data_sock(mdev);
return ok;
@@ -767,7 +767,7 @@
{
drbd_dev *odev = mdev;
- while(1) {
+ while (1) {
if (odev->sync_conf.after == -1) return 1;
odev = minor_to_mdev(odev->sync_conf.after);
ERR_IF(!odev) return 1;
@@ -824,14 +824,14 @@
return rv;
}
-void resume_next_sg(drbd_dev* mdev)
+void resume_next_sg(drbd_dev *mdev)
{
drbd_global_lock();
_drbd_resume_next(mdev);
drbd_global_unlock();
}
-void suspend_other_sg(drbd_dev* mdev)
+void suspend_other_sg(drbd_dev *mdev)
{
drbd_global_lock();
_drbd_pause_after(mdev);
@@ -1006,7 +1006,7 @@
list_splice_init(&mdev->data.work.q, &work_list);
spin_unlock_irq(&mdev->data.work.q_lock);
- while(!list_empty(&work_list)) {
+ while (!list_empty(&work_list)) {
w = list_entry(work_list.next, struct drbd_work, list);
list_del_init(&w->list);
w->cb(mdev, w, 1);
Modified: branches/drbd-8.0-for-linus/drbd/linux/drbd_nl.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/linux/drbd_nl.h 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/linux/drbd_nl.h 2007-07-24 13:25:49 UTC (rev 2993)
@@ -1,7 +1,7 @@
/*
PAKET( name,
- TYPE ( pn, pr, member )
- ...
+ TYPE ( pn, pr, member )
+ ...
)
You may never reissue one of the pn arguments
Modified: branches/drbd-8.0-for-linus/drbd/lru_cache.c
===================================================================
--- branches/drbd-8.0-for-linus/drbd/lru_cache.c 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/lru_cache.c 2007-07-24 13:25:49 UTC (rev 2993)
@@ -67,7 +67,7 @@
lc->new_number = -1;
lc->lc_private = private_p;
lc->name = name;
- for(i = 0;i < e_count;i++) {
+ for (i = 0; i < e_count; i++) {
e = lc_entry(lc, i);
e->lc_number = LC_FREE;
list_add(&e->list, &lc->free);
@@ -354,7 +354,7 @@
int i;
seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext);
- for(i = 0;i < nr_elements;i++) {
+ for (i = 0; i < nr_elements; i++) {
e = lc_entry(lc, i);
if (e->lc_number == LC_FREE) {
seq_printf(seq, "\t%2d: FREE\n", i );
Modified: branches/drbd-8.0-for-linus/drbd/lru_cache.h
===================================================================
--- branches/drbd-8.0-for-linus/drbd/lru_cache.h 2007-07-24 13:20:54 UTC (rev 2992)
+++ branches/drbd-8.0-for-linus/drbd/lru_cache.h 2007-07-24 13:25:49 UTC (rev 2993)
@@ -33,13 +33,13 @@
Once created, the api consists of
lc_find(,nr) -- finds the object with the given number, if present
lc_get(,nr) -- finds the object and increases the usage count
- if not present, actions are taken to make sure that
+ if not present, actions are taken to make sure that
the cache is updated, the user is notified of this by a callback.
Return value is NULL in this case.
As soon as the user informs the cache that it has been updated,
the next lc_get on that very object number will be successfull.
lc_put(,lc_element*)
- -- decreases the usage count of this object, and returns the new value.
+ -- decreases the usage count of this object, and returns the new value.
NOTE: It is the USERS responsibility to make sure that calls do not happen concurrently.
*/
@@ -60,7 +60,7 @@
struct lc_element {
struct hlist_node colision;
- struct list_head list; /* LRU list or free list */
+ struct list_head list; /* LRU list or free list */
unsigned int refcnt;
unsigned int lc_number;
};
@@ -107,8 +107,8 @@
extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_get (struct lru_cache *lc, unsigned int enr);
-extern unsigned int lc_put (struct lru_cache *lc, struct lc_element *e);
-extern void lc_changed(struct lru_cache *lc, struct lc_element *e);
+extern unsigned int lc_put (struct lru_cache *lc, struct lc_element *e);
+extern void lc_changed(struct lru_cache *lc, struct lc_element *e);
struct seq_file;
extern size_t lc_printf_stats(struct seq_file *seq, struct lru_cache *lc);
@@ -141,7 +141,7 @@
#define lc_e_base(lc) ((char *) ( (lc)->slot + (lc)->nr_elements ) )
#define lc_entry(lc, i) ((struct lc_element *) \
- (lc_e_base(lc) + (i)*(lc)->element_size))
+ (lc_e_base(lc) + (i)*(lc)->element_size))
#define lc_index_of(lc, e) (((char *)(e) - lc_e_base(lc))/(lc)->element_size)
#endif
More information about the drbd-cvs
mailing list