Lines Matching +full:1 +full:q

42 		"	lgr	1,%[schid]\n"  in do_siga_sync()
50 : CC_CLOBBER_LIST("0", "1", "2", "3")); in do_siga_sync()
61 " lgr 1,%[schid]\n" in do_siga_input()
67 : CC_CLOBBER_LIST("0", "1", "2")); in do_siga_input()
90 " lgr 1,%[schid]\n" in do_siga_output()
98 : CC_CLOBBER_LIST("0", "1", "2", "3")); in do_siga_output()
105 * @q: queue to manipulate
114 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument
117 int tmp_count = count, tmp_start = start, nr = q->nr; in qdio_do_eqbs()
120 qperf_inc(q, eqbs); in qdio_do_eqbs()
122 if (!q->is_input_q) in qdio_do_eqbs()
123 nr += q->irq_ptr->nr_input_qs; in qdio_do_eqbs()
125 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, in qdio_do_eqbs()
135 qperf_inc(q, eqbs_partial); in qdio_do_eqbs()
136 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", in qdio_do_eqbs()
141 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); in qdio_do_eqbs()
144 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); in qdio_do_eqbs()
145 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); in qdio_do_eqbs()
147 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr, in qdio_do_eqbs()
148 q->first_to_check, count, q->irq_ptr->int_parm); in qdio_do_eqbs()
155 * @q: queue to manipulate
164 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, in qdio_do_sqbs() argument
169 int nr = q->nr; in qdio_do_sqbs()
171 qperf_inc(q, sqbs); in qdio_do_sqbs()
173 if (!q->is_input_q) in qdio_do_sqbs()
174 nr += q->irq_ptr->nr_input_qs; in qdio_do_sqbs()
176 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); in qdio_do_sqbs()
186 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); in qdio_do_sqbs()
187 qperf_inc(q, sqbs_partial); in qdio_do_sqbs()
190 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); in qdio_do_sqbs()
191 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); in qdio_do_sqbs()
193 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr, in qdio_do_sqbs()
194 q->first_to_check, count, q->irq_ptr->int_parm); in qdio_do_sqbs()
203 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, in get_buf_states() argument
208 int i = 1; in get_buf_states()
210 if (is_qebsm(q)) in get_buf_states()
211 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); in get_buf_states()
214 __state = q->slsb.val[bufnr]; in get_buf_states()
224 if (q->slsb.val[bufnr] != __state) in get_buf_states()
233 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, in get_buf_state() argument
236 return get_buf_states(q, bufnr, state, 1, auto_ack); in get_buf_state()
240 static inline int set_buf_states(struct qdio_q *q, int bufnr, in set_buf_states() argument
245 if (is_qebsm(q)) in set_buf_states()
246 return qdio_do_sqbs(q, state, bufnr, count); in set_buf_states()
252 WRITE_ONCE(q->slsb.val[bufnr], state); in set_buf_states()
262 static inline int set_buf_state(struct qdio_q *q, int bufnr, in set_buf_state() argument
265 return set_buf_states(q, bufnr, state, 1); in set_buf_state()
271 struct qdio_q *q; in qdio_init_buf_states() local
274 for_each_input_queue(irq_ptr, q, i) in qdio_init_buf_states()
275 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, in qdio_init_buf_states()
277 for_each_output_queue(irq_ptr, q, i) in qdio_init_buf_states()
278 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, in qdio_init_buf_states()
282 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, in qdio_siga_sync() argument
285 unsigned long schid = *((u32 *) &q->irq_ptr->schid); in qdio_siga_sync()
289 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); in qdio_siga_sync()
290 qperf_inc(q, siga_sync); in qdio_siga_sync()
292 if (is_qebsm(q)) { in qdio_siga_sync()
293 schid = q->irq_ptr->sch_token; in qdio_siga_sync()
299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); in qdio_siga_sync()
303 static inline int qdio_sync_input_queue(struct qdio_q *q) in qdio_sync_input_queue() argument
305 return qdio_siga_sync(q, 0, q->mask); in qdio_sync_input_queue()
308 static inline int qdio_sync_output_queue(struct qdio_q *q) in qdio_sync_output_queue() argument
310 return qdio_siga_sync(q, q->mask, 0); in qdio_sync_output_queue()
313 static inline int qdio_siga_sync_q(struct qdio_q *q) in qdio_siga_sync_q() argument
315 if (q->is_input_q) in qdio_siga_sync_q()
316 return qdio_sync_input_queue(q); in qdio_siga_sync_q()
318 return qdio_sync_output_queue(q); in qdio_siga_sync_q()
321 static int qdio_siga_output(struct qdio_q *q, unsigned int count, in qdio_siga_output() argument
324 unsigned long schid = *((u32 *) &q->irq_ptr->schid); in qdio_siga_output()
329 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) { in qdio_siga_output()
330 if (count > 1) in qdio_siga_output()
336 if (is_qebsm(q)) { in qdio_siga_output()
337 schid = q->irq_ptr->sch_token; in qdio_siga_output()
341 cc = do_siga_output(schid, q->mask, busy_bit, fc, aob); in qdio_siga_output()
355 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, in qdio_siga_output()
356 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); in qdio_siga_output()
357 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries); in qdio_siga_output()
362 static inline int qdio_siga_input(struct qdio_q *q) in qdio_siga_input() argument
364 unsigned long schid = *((u32 *) &q->irq_ptr->schid); in qdio_siga_input()
368 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); in qdio_siga_input()
369 qperf_inc(q, siga_read); in qdio_siga_input()
371 if (is_qebsm(q)) { in qdio_siga_input()
372 schid = q->irq_ptr->sch_token; in qdio_siga_input()
376 cc = do_siga_input(schid, q->mask, fc); in qdio_siga_input()
378 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); in qdio_siga_input()
382 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, in debug_get_buf_state() argument
385 if (qdio_need_siga_sync(q->irq_ptr)) in debug_get_buf_state()
386 qdio_siga_sync_q(q); in debug_get_buf_state()
387 return get_buf_state(q, bufnr, state, 0); in debug_get_buf_state()
390 static inline void qdio_stop_polling(struct qdio_q *q) in qdio_stop_polling() argument
392 if (!q->u.in.batch_count) in qdio_stop_polling()
395 qperf_inc(q, stop_polling); in qdio_stop_polling()
398 set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT, in qdio_stop_polling()
399 q->u.in.batch_count); in qdio_stop_polling()
400 q->u.in.batch_count = 0; in qdio_stop_polling()
403 static inline void account_sbals(struct qdio_q *q, unsigned int count) in account_sbals() argument
405 q->q_stats.nr_sbal_total += count; in account_sbals()
406 q->q_stats.nr_sbals[ilog2(count)]++; in account_sbals()
409 static void process_buffer_error(struct qdio_q *q, unsigned int start, in process_buffer_error() argument
413 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q && in process_buffer_error()
414 q->sbal[start]->element[15].sflags == 0x10) { in process_buffer_error()
415 qperf_inc(q, target_full); in process_buffer_error()
416 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start); in process_buffer_error()
420 DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); in process_buffer_error()
421 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); in process_buffer_error()
424 q->sbal[start]->element[14].sflags, in process_buffer_error()
425 q->sbal[start]->element[15].sflags); in process_buffer_error()
428 static inline void inbound_handle_work(struct qdio_q *q, unsigned int start, in inbound_handle_work() argument
433 set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK); in inbound_handle_work()
435 if (!q->u.in.batch_count) in inbound_handle_work()
436 q->u.in.batch_start = start; in inbound_handle_work()
437 q->u.in.batch_count += count; in inbound_handle_work()
440 static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start, in get_inbound_buffer_frontier() argument
446 q->timestamp = get_tod_clock_fast(); in get_inbound_buffer_frontier()
448 count = atomic_read(&q->nr_buf_used); in get_inbound_buffer_frontier()
452 if (qdio_need_siga_sync(q->irq_ptr)) in get_inbound_buffer_frontier()
453 qdio_sync_input_queue(q); in get_inbound_buffer_frontier()
455 count = get_buf_states(q, start, &state, count, 1); in get_inbound_buffer_frontier()
461 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, in get_inbound_buffer_frontier()
464 inbound_handle_work(q, start, count, is_qebsm(q)); in get_inbound_buffer_frontier()
465 if (atomic_sub_return(count, &q->nr_buf_used) == 0) in get_inbound_buffer_frontier()
466 qperf_inc(q, inbound_queue_full); in get_inbound_buffer_frontier()
467 if (q->irq_ptr->perf_stat_enabled) in get_inbound_buffer_frontier()
468 account_sbals(q, count); in get_inbound_buffer_frontier()
471 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr, in get_inbound_buffer_frontier()
475 process_buffer_error(q, start, count); in get_inbound_buffer_frontier()
476 inbound_handle_work(q, start, count, false); in get_inbound_buffer_frontier()
477 if (atomic_sub_return(count, &q->nr_buf_used) == 0) in get_inbound_buffer_frontier()
478 qperf_inc(q, inbound_queue_full); in get_inbound_buffer_frontier()
479 if (q->irq_ptr->perf_stat_enabled) in get_inbound_buffer_frontier()
480 account_sbals_error(q, count); in get_inbound_buffer_frontier()
483 if (q->irq_ptr->perf_stat_enabled) in get_inbound_buffer_frontier()
484 q->q_stats.nr_sbal_nop++; in get_inbound_buffer_frontier()
485 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x", in get_inbound_buffer_frontier()
486 q->nr, start); in get_inbound_buffer_frontier()
492 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1, in get_inbound_buffer_frontier()
494 state, start, q->nr); in get_inbound_buffer_frontier()
504 struct qdio_q *q; in qdio_inspect_input_queue() local
510 q = irq->input_qs[nr]; in qdio_inspect_input_queue()
511 start = q->first_to_check; in qdio_inspect_input_queue()
514 count = get_inbound_buffer_frontier(q, start, error); in qdio_inspect_input_queue()
519 q->first_to_check = add_buf(start, count); in qdio_inspect_input_queue()
524 static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) in qdio_inbound_q_done() argument
528 if (!atomic_read(&q->nr_buf_used)) in qdio_inbound_q_done()
529 return 1; in qdio_inbound_q_done()
531 if (qdio_need_siga_sync(q->irq_ptr)) in qdio_inbound_q_done()
532 qdio_sync_input_queue(q); in qdio_inbound_q_done()
533 get_buf_state(q, start, &state, 0); in qdio_inbound_q_done()
539 return 1; in qdio_inbound_q_done()
542 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start, in get_outbound_buffer_frontier() argument
548 q->timestamp = get_tod_clock_fast(); in get_outbound_buffer_frontier()
550 count = atomic_read(&q->nr_buf_used); in get_outbound_buffer_frontier()
554 if (qdio_need_siga_sync(q->irq_ptr)) in get_outbound_buffer_frontier()
555 qdio_sync_output_queue(q); in get_outbound_buffer_frontier()
557 count = get_buf_states(q, start, &state, count, 0); in get_outbound_buffer_frontier()
567 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, in get_outbound_buffer_frontier()
568 "out empty:%1d %02x", q->nr, count); in get_outbound_buffer_frontier()
570 atomic_sub(count, &q->nr_buf_used); in get_outbound_buffer_frontier()
571 if (q->irq_ptr->perf_stat_enabled) in get_outbound_buffer_frontier()
572 account_sbals(q, count); in get_outbound_buffer_frontier()
575 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x", in get_outbound_buffer_frontier()
576 q->nr, count); in get_outbound_buffer_frontier()
579 process_buffer_error(q, start, count); in get_outbound_buffer_frontier()
580 atomic_sub(count, &q->nr_buf_used); in get_outbound_buffer_frontier()
581 if (q->irq_ptr->perf_stat_enabled) in get_outbound_buffer_frontier()
582 account_sbals_error(q, count); in get_outbound_buffer_frontier()
586 if (q->irq_ptr->perf_stat_enabled) in get_outbound_buffer_frontier()
587 q->q_stats.nr_sbal_nop++; in get_outbound_buffer_frontier()
588 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", in get_outbound_buffer_frontier()
589 q->nr); in get_outbound_buffer_frontier()
596 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1, in get_outbound_buffer_frontier()
598 state, start, q->nr); in get_outbound_buffer_frontier()
608 struct qdio_q *q; in qdio_inspect_output_queue() local
614 q = irq->output_qs[nr]; in qdio_inspect_output_queue()
615 start = q->first_to_check; in qdio_inspect_output_queue()
618 count = get_outbound_buffer_frontier(q, start, error); in qdio_inspect_output_queue()
623 q->first_to_check = add_buf(start, count); in qdio_inspect_output_queue()
628 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count, in qdio_kick_outbound_q() argument
634 if (!qdio_need_siga_out(q->irq_ptr)) in qdio_kick_outbound_q()
637 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); in qdio_kick_outbound_q()
639 qperf_inc(q, siga_write); in qdio_kick_outbound_q()
641 cc = qdio_siga_output(q, count, &busy_bit, aob); in qdio_kick_outbound_q()
651 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); in qdio_kick_outbound_q()
654 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); in qdio_kick_outbound_q()
658 case 1: in qdio_kick_outbound_q()
660 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); in qdio_kick_outbound_q()
665 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); in qdio_kick_outbound_q()
674 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); in qdio_set_state()
732 if (dcc == 1) in qdio_establish_handle_irq()
791 else if (dcc == 1) in qdio_int_handler()
797 WARN_ON_ONCE(1); in qdio_int_handler()
978 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs, in qdio_allocate()
1020 DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format); in qdio_trace_init_data()
1023 DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs, in qdio_trace_init_data()
1085 /* establish q */ in qdio_establish()
1204 * @q: queue containing the buffers
1208 static int handle_inbound(struct qdio_q *q, int bufnr, int count) in handle_inbound() argument
1212 qperf_inc(q, inbound_call); in handle_inbound()
1215 overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr), in handle_inbound()
1216 q->u.in.batch_count); in handle_inbound()
1218 q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap); in handle_inbound()
1219 q->u.in.batch_count -= overlap; in handle_inbound()
1222 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); in handle_inbound()
1223 atomic_add(count, &q->nr_buf_used); in handle_inbound()
1225 if (qdio_need_siga_in(q->irq_ptr)) in handle_inbound()
1226 return qdio_siga_input(q); in handle_inbound()
1262 * @q: queue containing the buffers
1267 static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count, in handle_outbound() argument
1273 qperf_inc(q, outbound_call); in handle_outbound()
1275 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); in handle_outbound()
1276 used = atomic_add_return(count, &q->nr_buf_used); in handle_outbound()
1279 qperf_inc(q, outbound_queue_full); in handle_outbound()
1281 if (queue_type(q) == QDIO_IQDIO_QFMT) { in handle_outbound()
1285 rc = qdio_kick_outbound_q(q, count, phys_aob); in handle_outbound()
1286 } else if (qdio_need_siga_sync(q->irq_ptr)) { in handle_outbound()
1287 rc = qdio_sync_output_queue(q); in handle_outbound()
1289 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 && in handle_outbound()
1292 qperf_inc(q, fast_requeue); in handle_outbound()
1294 rc = qdio_kick_outbound_q(q, count, 0); in handle_outbound()
1337 * 1 - irqs not started since new data is available
1341 struct qdio_q *q; in qdio_start_irq() local
1348 for_each_input_queue(irq_ptr, q, i) in qdio_start_irq()
1349 qdio_stop_polling(q); in qdio_start_irq()
1360 for_each_input_queue(irq_ptr, q, i) { in qdio_start_irq()
1361 if (!qdio_inbound_q_done(q, q->first_to_check)) in qdio_start_irq()
1371 return 1; in qdio_start_irq()
1382 * 1 - interrupts successfully disabled
1394 return 1; in qdio_stop_irq()