Lines Matching full:pp

1081 	struct smu_private *pp;  in smu_open()  local
1084 pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL); in smu_open()
1085 if (!pp) in smu_open()
1087 spin_lock_init(&pp->lock); in smu_open()
1088 pp->mode = smu_file_commands; in smu_open()
1089 init_waitqueue_head(&pp->wait); in smu_open()
1093 list_add(&pp->list, &smu_clist); in smu_open()
1095 file->private_data = pp; in smu_open()
1104 struct smu_private *pp = misc; in smu_user_cmd_done() local
1106 wake_up_all(&pp->wait); in smu_user_cmd_done()
1113 struct smu_private *pp = file->private_data; in smu_write() local
1118 if (pp->busy) in smu_write()
1123 pp->mode = smu_file_events; in smu_write()
1135 else if (pp->mode != smu_file_commands) in smu_write()
1140 spin_lock_irqsave(&pp->lock, flags); in smu_write()
1141 if (pp->busy) { in smu_write()
1142 spin_unlock_irqrestore(&pp->lock, flags); in smu_write()
1145 pp->busy = 1; in smu_write()
1146 pp->cmd.status = 1; in smu_write()
1147 spin_unlock_irqrestore(&pp->lock, flags); in smu_write()
1149 if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) { in smu_write()
1150 pp->busy = 0; in smu_write()
1154 pp->cmd.cmd = hdr.cmd; in smu_write()
1155 pp->cmd.data_len = hdr.data_len; in smu_write()
1156 pp->cmd.reply_len = SMU_MAX_DATA; in smu_write()
1157 pp->cmd.data_buf = pp->buffer; in smu_write()
1158 pp->cmd.reply_buf = pp->buffer; in smu_write()
1159 pp->cmd.done = smu_user_cmd_done; in smu_write()
1160 pp->cmd.misc = pp; in smu_write()
1161 rc = smu_queue_cmd(&pp->cmd); in smu_write()
1168 static ssize_t smu_read_command(struct file *file, struct smu_private *pp, in smu_read_command() argument
1176 if (!pp->busy) in smu_read_command()
1180 spin_lock_irqsave(&pp->lock, flags); in smu_read_command()
1181 if (pp->cmd.status == 1) { in smu_read_command()
1183 spin_unlock_irqrestore(&pp->lock, flags); in smu_read_command()
1186 add_wait_queue(&pp->wait, &wait); in smu_read_command()
1190 if (pp->cmd.status != 1) in smu_read_command()
1195 spin_unlock_irqrestore(&pp->lock, flags); in smu_read_command()
1197 spin_lock_irqsave(&pp->lock, flags); in smu_read_command()
1200 remove_wait_queue(&pp->wait, &wait); in smu_read_command()
1202 spin_unlock_irqrestore(&pp->lock, flags); in smu_read_command()
1205 if (pp->cmd.status != 0) in smu_read_command()
1206 pp->cmd.reply_len = 0; in smu_read_command()
1207 size = sizeof(hdr) + pp->cmd.reply_len; in smu_read_command()
1211 hdr.status = pp->cmd.status; in smu_read_command()
1212 hdr.reply_len = pp->cmd.reply_len; in smu_read_command()
1216 if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size)) in smu_read_command()
1218 pp->busy = 0; in smu_read_command()
1224 static ssize_t smu_read_events(struct file *file, struct smu_private *pp, in smu_read_events() argument
1236 struct smu_private *pp = file->private_data; in smu_read() local
1238 if (pp->mode == smu_file_commands) in smu_read()
1239 return smu_read_command(file, pp, buf, count); in smu_read()
1240 if (pp->mode == smu_file_events) in smu_read()
1241 return smu_read_events(file, pp, buf, count); in smu_read()
1248 struct smu_private *pp = file->private_data; in smu_fpoll() local
1252 if (!pp) in smu_fpoll()
1255 if (pp->mode == smu_file_commands) { in smu_fpoll()
1256 poll_wait(file, &pp->wait, wait); in smu_fpoll()
1258 spin_lock_irqsave(&pp->lock, flags); in smu_fpoll()
1259 if (pp->busy && pp->cmd.status != 1) in smu_fpoll()
1261 spin_unlock_irqrestore(&pp->lock, flags); in smu_fpoll()
1263 if (pp->mode == smu_file_events) { in smu_fpoll()
1271 struct smu_private *pp = file->private_data; in smu_release() local
1275 if (!pp) in smu_release()
1281 spin_lock_irqsave(&pp->lock, flags); in smu_release()
1282 pp->mode = smu_file_closing; in smu_release()
1283 busy = pp->busy; in smu_release()
1286 if (busy && pp->cmd.status == 1) { in smu_release()
1289 add_wait_queue(&pp->wait, &wait); in smu_release()
1292 if (pp->cmd.status != 1) in smu_release()
1294 spin_unlock_irqrestore(&pp->lock, flags); in smu_release()
1296 spin_lock_irqsave(&pp->lock, flags); in smu_release()
1299 remove_wait_queue(&pp->wait, &wait); in smu_release()
1301 spin_unlock_irqrestore(&pp->lock, flags); in smu_release()
1304 list_del(&pp->list); in smu_release()
1306 kfree(pp); in smu_release()