diff options
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/hw_random/Kconfig | 2 | ||||
-rw-r--r-- | drivers/char/ipmi/Kconfig | 7 | ||||
-rw-r--r-- | drivers/char/ipmi/Makefile | 1 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_ipmb.c | 4 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_kcs_sm.c | 16 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 605 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_powernv.c | 17 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si.h | 7 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 74 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_ls2k.c | 189 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_ssif.c | 4 | ||||
-rw-r--r-- | drivers/char/mem.c | 21 | ||||
-rw-r--r-- | drivers/char/tpm/Kconfig | 9 | ||||
-rw-r--r-- | drivers/char/tpm/Makefile | 1 | ||||
-rw-r--r-- | drivers/char/tpm/tpm_loongson.c | 84 |
15 files changed, 702 insertions, 339 deletions
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index c85827843447..e316cbc5baa9 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -77,7 +77,7 @@ config HW_RANDOM_AIROHA config HW_RANDOM_ATMEL tristate "Atmel Random Number Generator support" - depends on (ARCH_AT91 || COMPILE_TEST) + depends on (ARCH_MICROCHIP || COMPILE_TEST) default HW_RANDOM help This driver provides kernel-side support for the Random Number diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index f4adc6feb3b2..92bed266d07c 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -84,6 +84,13 @@ config IPMI_IPMB bus, and it also supports direct messaging on the bus using IPMB direct messages. This module requires I2C support. +config IPMI_LS2K + bool 'Loongson-2K IPMI interface' + depends on LOONGARCH + select MFD_LS2K_BMC_CORE + help + Provides a driver for Loongson-2K IPMI interfaces. + config IPMI_POWERNV depends on PPC_POWERNV tristate 'POWERNV (OPAL firmware) IPMI interface' diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index e0944547c9d0..4ea450a82242 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -8,6 +8,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \ ipmi_si_mem_io.o ipmi_si-$(CONFIG_HAS_IOPORT) += ipmi_si_port_io.o ipmi_si-$(CONFIG_PCI) += ipmi_si_pci.o +ipmi_si-$(CONFIG_IPMI_LS2K) += ipmi_si_ls2k.o ipmi_si-$(CONFIG_PARISC) += ipmi_si_parisc.o obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c index 6a4f279c7c1f..3a51e58b2487 100644 --- a/drivers/char/ipmi/ipmi_ipmb.c +++ b/drivers/char/ipmi/ipmi_ipmb.c @@ -404,8 +404,7 @@ static void ipmi_ipmb_shutdown(void *send_info) ipmi_ipmb_stop_thread(iidev); } -static void ipmi_ipmb_sender(void *send_info, - struct ipmi_smi_msg *msg) +static int ipmi_ipmb_sender(void *send_info, struct ipmi_smi_msg *msg) { struct ipmi_ipmb_dev *iidev = send_info; unsigned long flags; @@ -417,6 +416,7 @@ static void ipmi_ipmb_sender(void *send_info, spin_unlock_irqrestore(&iidev->lock, flags); up(&iidev->wake_thread); + return IPMI_CC_NO_ERROR; } static void ipmi_ipmb_request_events(void *send_info) diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index ecfcb50302f6..efda90dcf5b3 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c @@ -122,10 +122,10 @@ struct si_sm_data { unsigned long error0_timeout; }; -static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs, - struct si_sm_io *io, enum kcs_states state) +static unsigned int init_kcs_data(struct si_sm_data *kcs, + struct si_sm_io *io) { - kcs->state = state; + kcs->state = KCS_IDLE; kcs->io = io; kcs->write_pos = 0; kcs->write_count = 0; @@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs, return 2; } -static unsigned int init_kcs_data(struct si_sm_data *kcs, - struct si_sm_io *io) -{ - return init_kcs_data_with_state(kcs, io, KCS_IDLE); -} - static inline unsigned char read_status(struct si_sm_data *kcs) { return kcs->io->inputb(kcs->io, 1); @@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, if (size > MAX_KCS_WRITE_SIZE) return IPMI_REQ_LEN_EXCEEDED_ERR; - if (kcs->state != KCS_IDLE) { + if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) { dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state); return IPMI_NOT_IN_MY_STATE_ERR; } @@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) } if (kcs->state == KCS_HOSED) { - init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0); + init_kcs_data(kcs, kcs->io); return SI_SM_HOSED; } diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 8e9050f99e9e..a0b67a35a5f0 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -38,7 +38,9 @@ #define IPMI_DRIVER_VERSION "39.2" -static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user); +static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, + struct ipmi_user *user); static int ipmi_init_msghandler(void); static void smi_work(struct work_struct *t); static void handle_new_recv_msgs(struct ipmi_smi *intf); @@ -50,6 +52,8 @@ static void intf_free(struct kref *ref); static bool initialized; static bool drvregistered; +static struct timer_list ipmi_timer; + /* Numbers in this enumerator should be mapped to ipmi_panic_event_str */ enum ipmi_panic_event_op { IPMI_SEND_PANIC_EVENT_NONE, @@ -432,6 +436,7 @@ struct ipmi_smi { atomic_t nr_users; struct device_attribute nr_users_devattr; struct device_attribute nr_msgs_devattr; + struct device_attribute maintenance_mode_devattr; /* Used for wake ups at startup. */ @@ -464,7 +469,7 @@ struct ipmi_smi { * interface to match them up with their responses. A routine * is called periodically to time the items in this list. */ - spinlock_t seq_lock; + struct mutex seq_lock; struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; int curr_seq; @@ -539,7 +544,11 @@ struct ipmi_smi { /* For handling of maintenance mode. */ int maintenance_mode; - bool maintenance_mode_enable; + +#define IPMI_MAINTENANCE_MODE_STATE_OFF 0 +#define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1 +#define IPMI_MAINTENANCE_MODE_STATE_RESET 2 + int maintenance_mode_state; int auto_maintenance_timeout; spinlock_t maintenance_mode_lock; /* Used in a timer... */ @@ -955,7 +964,6 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg) * risk. At this moment, simply skip it in that case. */ ipmi_free_recv_msg(msg); - atomic_dec(&msg->user->nr_msgs); } else { /* * Deliver it in smi_work. The message will hold a @@ -1116,12 +1124,11 @@ static int intf_find_seq(struct ipmi_smi *intf, struct ipmi_recv_msg **recv_msg) { int rv = -ENODEV; - unsigned long flags; if (seq >= IPMI_IPMB_NUM_SEQ) return -EINVAL; - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); if (intf->seq_table[seq].inuse) { struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; @@ -1134,7 +1141,7 @@ static int intf_find_seq(struct ipmi_smi *intf, rv = 0; } } - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); return rv; } @@ -1145,14 +1152,13 @@ static int intf_start_seq_timer(struct ipmi_smi *intf, long msgid) { int rv = -ENODEV; - unsigned long flags; unsigned char seq; unsigned long seqid; GET_SEQ_FROM_MSGID(msgid, seq, seqid); - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); /* * We do this verification because the user can be deleted * while a message is outstanding. @@ -1163,7 +1169,7 @@ static int intf_start_seq_timer(struct ipmi_smi *intf, ent->timeout = ent->orig_timeout; rv = 0; } - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); return rv; } @@ -1174,7 +1180,6 @@ static int intf_err_seq(struct ipmi_smi *intf, unsigned int err) { int rv = -ENODEV; - unsigned long flags; unsigned char seq; unsigned long seqid; struct ipmi_recv_msg *msg = NULL; @@ -1182,7 +1187,7 @@ static int intf_err_seq(struct ipmi_smi *intf, GET_SEQ_FROM_MSGID(msgid, seq, seqid); - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); /* * We do this verification because the user can be deleted * while a message is outstanding. @@ -1196,7 +1201,7 @@ static int intf_err_seq(struct ipmi_smi *intf, msg = ent->recv_msg; rv = 0; } - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); if (msg) deliver_err_response(intf, msg, err); @@ -1209,7 +1214,6 @@ int ipmi_create_user(unsigned int if_num, void *handler_data, struct ipmi_user **user) { - unsigned long flags; struct ipmi_user *new_user = NULL; int rv = 0; struct ipmi_smi *intf; @@ -1277,9 +1281,9 @@ int ipmi_create_user(unsigned int if_num, new_user->gets_events = false; mutex_lock(&intf->users_mutex); - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); list_add(&new_user->link, &intf->users); - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); mutex_unlock(&intf->users_mutex); if (handler->ipmi_watchdog_pretimeout) @@ -1325,7 +1329,6 @@ static void _ipmi_destroy_user(struct ipmi_user *user) { struct ipmi_smi *intf = user->intf; int i; - unsigned long flags; struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvrs = NULL; struct ipmi_recv_msg *msg, *msg2; @@ -1346,7 +1349,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) list_del(&user->link); atomic_dec(&intf->nr_users); - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { if (intf->seq_table[i].inuse && (intf->seq_table[i].recv_msg->user == user)) { @@ -1355,7 +1358,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user) ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } } - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); /* * Remove the user from the command receiver's table. First @@ -1534,8 +1537,15 @@ EXPORT_SYMBOL(ipmi_get_maintenance_mode); static void maintenance_mode_update(struct ipmi_smi *intf) { if (intf->handlers->set_maintenance_mode) + /* + * Lower level drivers only care about firmware mode + * as it affects their timing. They don't care about + * reset, which disables all commands for a while. + */ intf->handlers->set_maintenance_mode( - intf->send_info, intf->maintenance_mode_enable); + intf->send_info, + (intf->maintenance_mode_state == + IPMI_MAINTENANCE_MODE_STATE_FIRMWARE)); } int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) @@ -1552,16 +1562,17 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode) if (intf->maintenance_mode != mode) { switch (mode) { case IPMI_MAINTENANCE_MODE_AUTO: - intf->maintenance_mode_enable - = (intf->auto_maintenance_timeout > 0); + /* Just leave it alone. */ break; case IPMI_MAINTENANCE_MODE_OFF: - intf->maintenance_mode_enable = false; + intf->maintenance_mode_state = + IPMI_MAINTENANCE_MODE_STATE_OFF; break; case IPMI_MAINTENANCE_MODE_ON: - intf->maintenance_mode_enable = true; + intf->maintenance_mode_state = + IPMI_MAINTENANCE_MODE_STATE_FIRMWARE; break; default: @@ -1616,8 +1627,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val) } list_for_each_entry_safe(msg, msg2, &msgs, link) { - msg->user = user; - kref_get(&user->refcount); + ipmi_set_recv_msg_user(msg, user); deliver_local_response(intf, msg); } } @@ -1922,14 +1932,20 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf, if (is_maintenance_mode_cmd(msg)) { unsigned long flags; + int newst; + + if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST) + newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE; + else + newst = IPMI_MAINTENANCE_MODE_STATE_RESET; spin_lock_irqsave(&intf->maintenance_mode_lock, flags); - intf->auto_maintenance_timeout - = maintenance_mode_timeout_ms; + intf->auto_maintenance_timeout = maintenance_mode_timeout_ms; if (!intf->maintenance_mode - && !intf->maintenance_mode_enable) { - intf->maintenance_mode_enable = true; + && intf->maintenance_mode_state < newst) { + intf->maintenance_mode_state = newst; maintenance_mode_update(intf); + mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags); @@ -1943,7 +1959,7 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf, smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3); smi_msg->data[1] = msg->cmd; smi_msg->msgid = msgid; - smi_msg->user_data = recv_msg; + smi_msg->recv_msg = recv_msg; if (msg->data_len > 0) memcpy(&smi_msg->data[2], msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 2; @@ -2024,12 +2040,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf, * Save the receive message so we can use it * to deliver the response. */ - smi_msg->user_data = recv_msg; + smi_msg->recv_msg = recv_msg; } else { - /* It's a command, so get a sequence for it. */ - unsigned long flags; - - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); if (is_maintenance_mode_cmd(msg)) intf->ipmb_maintenance_mode_timeout = @@ -2087,7 +2100,7 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf, * to be correct. */ out_err: - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); } return rv; @@ -2140,7 +2153,7 @@ static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf, memcpy(smi_msg->data + 4, msg->data, msg->data_len); smi_msg->data_size = msg->data_len + 4; - smi_msg->user_data = recv_msg; + smi_msg->recv_msg = recv_msg; return 0; } @@ -2203,12 +2216,9 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf, * Save the receive message so we can use it * to deliver the response. */ - smi_msg->user_data = recv_msg; + smi_msg->recv_msg = recv_msg; } else { - /* It's a command, so get a sequence for it. */ - unsigned long flags; - - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); /* * Create a sequence number with a 1 second @@ -2257,7 +2267,7 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf, * to be correct. */ out_err: - spin_unlock_irqrestore(&intf->seq_lock, flags); + mutex_unlock(&intf->seq_lock); } return rv; @@ -2288,22 +2298,15 @@ static int i_ipmi_request(struct ipmi_user *user, int run_to_completion = READ_ONCE(intf->run_to_completion); int rv = 0; - if (user) { - if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { - /* Decrement will happen at the end of the routine. */ - rv = -EBUSY; - goto out; - } - } - - if (supplied_recv) + if (supplied_recv) { recv_msg = supplied_recv; - else { - recv_msg = ipmi_alloc_recv_msg(); - if (recv_msg == NULL) { - rv = -ENOMEM; - goto out; - } + recv_msg->user = user; + if (user) + atomic_inc(&user->nr_msgs); + } else { + recv_msg = ipmi_alloc_recv_msg(user); + if (IS_ERR(recv_msg)) + return PTR_ERR(recv_msg); } recv_msg->user_msg_data = user_msg_data; @@ -2314,22 +2317,22 @@ static int i_ipmi_request(struct ipmi_user *user, if (smi_msg == NULL) { if (!supplied_recv) ipmi_free_recv_msg(recv_msg); - rv = -ENOMEM; - goto out; + return -ENOMEM; } } if (!run_to_completion) mutex_lock(&intf->users_mutex); + if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) { + /* No messages while the BMC is in reset. */ + rv = -EBUSY; + goto out_err; + } if (intf->in_shutdown) { rv = -ENODEV; goto out_err; } - recv_msg->user = user; - if (user) - /* The put happens when the message is freed. */ - kref_get(&user->refcount); recv_msg->msgid = msgid; /* * Store the message to send in the receive message so timeout @@ -2358,8 +2361,10 @@ static int i_ipmi_request(struct ipmi_user *user, if (rv) { out_err: - ipmi_free_smi_msg(smi_msg); - ipmi_free_recv_msg(recv_msg); + if (!supplied_smi) + ipmi_free_smi_msg(smi_msg); + if (!supplied_recv) + ipmi_free_recv_msg(recv_msg); } else { dev_dbg(intf->si_dev, "Send: %*ph\n", smi_msg->data_size, smi_msg->data); @@ -2369,9 +2374,6 @@ out_err: if (!run_to_completion) mutex_unlock(&intf->users_mutex); -out: - if (rv && user) - atomic_dec(&user->nr_msgs); return rv; } @@ -2622,6 +2624,12 @@ retry_bmc_lock: (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry))) goto out_noprocessing; + /* Don't allow sysfs access when in maintenance mode. */ + if (intf->maintenance_mode_state) { + rv = -EBUSY; + goto out_noprocessing; + } + prev_guid_set = bmc->dyn_guid_set; __get_guid(intf); @@ -3517,6 +3525,19 @@ static ssize_t nr_msgs_show(struct device *dev, } static DEVICE_ATTR_RO(nr_msgs); +static ssize_t maintenance_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ipmi_smi *intf = container_of(attr, + struct ipmi_smi, + maintenance_mode_devattr); + + return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state, + intf->auto_maintenance_timeout); +} +static DEVICE_ATTR_RO(maintenance_mode); + static void redo_bmc_reg(struct work_struct *work) { struct ipmi_smi *intf = container_of(work, struct ipmi_smi, @@ -3575,7 +3596,7 @@ int ipmi_add_smi(struct module *owner, atomic_set(&intf->nr_users, 0); intf->handlers = handlers; intf->send_info = send_info; - spin_lock_init(&intf->seq_lock); + mutex_init(&intf->seq_lock); for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) { intf->seq_table[j].inuse = 0; intf->seq_table[j].seqid = 0; @@ -3653,6 +3674,14 @@ int ipmi_add_smi(struct module *owner, goto out_err_bmc_reg; } + intf->maintenance_mode_devattr = dev_attr_maintenance_mode; + sysfs_attr_init(&intf->maintenance_mode_devattr.attr); + rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr); + if (rv) { + device_remove_file(intf->si_dev, &intf->nr_users_devattr); + goto out_err_bmc_reg; + } + intf->intf_num = i; mutex_unlock(&ipmi_interfaces_mutex); @@ -3760,6 +3789,7 @@ void ipmi_unregister_smi(struct ipmi_smi *intf) if (intf->handlers->shutdown) intf->handlers->shutdown(intf->send_info); + device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr); device_remove_file(intf->si_dev, &intf->nr_msgs_devattr); device_remove_file(intf->si_dev, &intf->nr_users_devattr); @@ -3862,7 +3892,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_ipmb_addr *ipmb_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; if (msg->rsp_size < 10) { /* Message not big enough, just ignore it. */ @@ -3883,9 +3913,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -3915,47 +3944,41 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf, * causes it to not be freed or queued. */ rv = -1; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_ipmi_user); - } else { - /* Extract the source address from the data. */ - ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; - ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; - ipmb_addr->slave_addr = msg->rsp[6]; - ipmb_addr->lun = msg->rsp[7] & 3; - ipmb_addr->channel = msg->rsp[3] & 0xf; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr; + ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE; + ipmb_addr->slave_addr = msg->rsp[6]; + ipmb_addr->lun = msg->rsp[7] & 3; + ipmb_addr->channel = msg->rsp[3] & 0xf; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = msg->rsp[7] >> 2; - recv_msg->msg.netfn = msg->rsp[4] >> 2; - recv_msg->msg.cmd = msg->rsp[8]; - recv_msg->msg.data = recv_msg->msg_data; + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[7] >> 2; + recv_msg->msg.netfn = msg->rsp[4] >> 2; + recv_msg->msg.cmd = msg->rsp[8]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * We chop off 10, not 9 bytes because the checksum - * at the end also needs to be removed. - */ - recv_msg->msg.data_len = msg->rsp_size - 10; - memcpy(recv_msg->msg_data, &msg->rsp[9], - msg->rsp_size - 10); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * We chop off 10, not 9 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 10; + memcpy(recv_msg->msg_data, &msg->rsp[9], + msg->rsp_size - 10); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -3968,7 +3991,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, int rv = 0; struct ipmi_user *user = NULL; struct ipmi_ipmb_direct_addr *daddr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; unsigned char netfn = msg->rsp[0] >> 2; unsigned char cmd = msg->rsp[3]; @@ -3977,9 +4000,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, 0); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4001,44 +4023,38 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf, * causes it to not be freed or queued. */ rv = -1; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_ipmi_user); - } else { - /* Extract the source address from the data. */ - daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; - daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; - daddr->channel = 0; - daddr->slave_addr = msg->rsp[1]; - daddr->rs_lun = msg->rsp[0] & 3; - daddr->rq_lun = msg->rsp[2] & 3; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr; + daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE; + daddr->channel = 0; + daddr->slave_addr = msg->rsp[1]; + daddr->rs_lun = msg->rsp[0] & 3; + daddr->rq_lun = msg->rsp[2] & 3; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = (msg->rsp[2] >> 2); - recv_msg->msg.netfn = msg->rsp[0] >> 2; - recv_msg->msg.cmd = msg->rsp[3]; - recv_msg->msg.data = recv_msg->msg_data; - - recv_msg->msg.data_len = msg->rsp_size - 4; - memcpy(recv_msg->msg_data, msg->rsp + 4, - msg->rsp_size - 4); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = (msg->rsp[2] >> 2); + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[3]; + recv_msg->msg.data = recv_msg->msg_data; + + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, msg->rsp + 4, + msg->rsp_size - 4); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4050,7 +4066,7 @@ static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg; struct ipmi_ipmb_direct_addr *daddr; - recv_msg = msg->user_data; + recv_msg = msg->recv_msg; if (recv_msg == NULL) { dev_warn(intf->si_dev, "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); @@ -4152,7 +4168,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_lan_addr *lan_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; if (msg->rsp_size < 12) { /* Message not big enough, just ignore it. */ @@ -4173,9 +4189,8 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4206,49 +4221,44 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf, * causes it to not be freed or queued. */ rv = -1; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling later. - */ - rv = 1; - kref_put(&user->refcount, free_ipmi_user); - } else { - /* Extract the source address from the data. */ - lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; - lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; - lan_addr->session_handle = msg->rsp[4]; - lan_addr->remote_SWID = msg->rsp[8]; - lan_addr->local_SWID = msg->rsp[5]; - lan_addr->lun = msg->rsp[9] & 3; - lan_addr->channel = msg->rsp[3] & 0xf; - lan_addr->privilege = msg->rsp[3] >> 4; + } else if (!IS_ERR(recv_msg)) { + /* Extract the source address from the data. */ + lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr; + lan_addr->addr_type = IPMI_LAN_ADDR_TYPE; + lan_addr->session_handle = msg->rsp[4]; + lan_addr->remote_SWID = msg->rsp[8]; + lan_addr->local_SWID = msg->rsp[5]; + lan_addr->lun = msg->rsp[9] & 3; + lan_addr->channel = msg->rsp[3] & 0xf; + lan_addr->privilege = msg->rsp[3] >> 4; - /* - * Extract the rest of the message information - * from the IPMB header. - */ - recv_msg->user = user; - recv_msg->recv_type = IPMI_CMD_RECV_TYPE; - recv_msg->msgid = msg->rsp[9] >> 2; - recv_msg->msg.netfn = msg->rsp[6] >> 2; - recv_msg->msg.cmd = msg->rsp[10]; - recv_msg->msg.data = recv_msg->msg_data; + /* + * Extract the rest of the message information + * from the IPMB header. + */ + recv_msg->recv_type = IPMI_CMD_RECV_TYPE; + recv_msg->msgid = msg->rsp[9] >> 2; + recv_msg->msg.netfn = msg->rsp[6] >> 2; + recv_msg->msg.cmd = msg->rsp[10]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * We chop off 12, not 11 bytes because the checksum - * at the end also needs to be removed. - */ - recv_msg->msg.data_len = msg->rsp_size - 12; - memcpy(recv_msg->msg_data, &msg->rsp[11], - msg->rsp_size - 12); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * We chop off 12, not 11 bytes because the checksum + * at the end also needs to be removed. + */ + recv_msg->msg.data_len = msg->rsp_size - 12; + memcpy(recv_msg->msg_data, &msg->rsp[11], + msg->rsp_size - 12); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4270,7 +4280,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, unsigned char chan; struct ipmi_user *user = NULL; struct ipmi_system_interface_addr *smi_addr; - struct ipmi_recv_msg *recv_msg; + struct ipmi_recv_msg *recv_msg = NULL; /* * We expect the OEM SW to perform error checking @@ -4299,9 +4309,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, rcvr = find_cmd_rcvr(intf, netfn, cmd, chan); if (rcvr) { user = rcvr->user; - kref_get(&user->refcount); - } else - user = NULL; + recv_msg = ipmi_alloc_recv_msg(user); + } rcu_read_unlock(); if (user == NULL) { @@ -4314,48 +4323,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf, */ rv = 0; - } else { - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { - /* - * We couldn't allocate memory for the - * message, so requeue it for handling - * later. - */ - rv = 1; - kref_put(&user->refcount, free_ipmi_user); - } else { - /* - * OEM Messages are expected to be delivered via - * the system interface to SMS software. We might - * need to visit this again depending on OEM - * requirements - */ - smi_addr = ((struct ipmi_system_interface_addr *) - &recv_msg->addr); - smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; - smi_addr->channel = IPMI_BMC_CHANNEL; - smi_addr->lun = msg->rsp[0] & 3; - - recv_msg->user = user; - recv_msg->user_msg_data = NULL; - recv_msg->recv_type = IPMI_OEM_RECV_TYPE; - recv_msg->msg.netfn = msg->rsp[0] >> 2; - recv_msg->msg.cmd = msg->rsp[1]; - recv_msg->msg.data = recv_msg->msg_data; + } else if (!IS_ERR(recv_msg)) { + /* + * OEM Messages are expected to be delivered via + * the system interface to SMS software. We might + * need to visit this again depending on OEM + * requirements + */ + smi_addr = ((struct ipmi_system_interface_addr *) + &recv_msg->addr); + smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; + smi_addr->channel = IPMI_BMC_CHANNEL; + smi_addr->lun = msg->rsp[0] & 3; + + recv_msg->user_msg_data = NULL; + recv_msg->recv_type = IPMI_OEM_RECV_TYPE; + recv_msg->msg.netfn = msg->rsp[0] >> 2; + recv_msg->msg.cmd = msg->rsp[1]; + recv_msg->msg.data = recv_msg->msg_data; - /* - * The message starts at byte 4 which follows the - * Channel Byte in the "GET MESSAGE" command - */ - recv_msg->msg.data_len = msg->rsp_size - 4; - memcpy(recv_msg->msg_data, &msg->rsp[4], - msg->rsp_size - 4); - if (deliver_response(intf, recv_msg)) - ipmi_inc_stat(intf, unhandled_commands); - else - ipmi_inc_stat(intf, handled_commands); - } + /* + * The message starts at byte 4 which follows the + * Channel Byte in the "GET MESSAGE" command + */ + recv_msg->msg.data_len = msg->rsp_size - 4; + memcpy(recv_msg->msg_data, &msg->rsp[4], + msg->rsp_size - 4); + if (deliver_response(intf, recv_msg)) + ipmi_inc_stat(intf, unhandled_commands); + else + ipmi_inc_stat(intf, handled_commands); + } else { + /* + * We couldn't allocate memory for the message, so + * requeue it for handling later. + */ + rv = 1; } return rv; @@ -4413,8 +4416,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, if (!user->gets_events) continue; - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { + recv_msg = ipmi_alloc_recv_msg(user); + if (IS_ERR(recv_msg)) { mutex_unlock(&intf->users_mutex); list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) { @@ -4435,8 +4438,6 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, deliver_count++; copy_event_into_recv_msg(recv_msg, msg); - recv_msg->user = user; - kref_get(&user->refcount); list_add_tail(&recv_msg->link, &msgs); } mutex_unlock(&intf->users_mutex); @@ -4452,8 +4453,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf, * No one to receive the message, put it in queue if there's * not already too many things in the queue. */ - recv_msg = ipmi_alloc_recv_msg(); - if (!recv_msg) { + recv_msg = ipmi_alloc_recv_msg(NULL); + if (IS_ERR(recv_msg)) { /* * We couldn't allocate memory for the * message, so requeue it for handling @@ -4488,7 +4489,7 @@ static int handle_bmc_rsp(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg; struct ipmi_system_interface_addr *smi_addr; - recv_msg = msg->user_data; + recv_msg = msg->recv_msg; if (recv_msg == NULL) { dev_warn(intf->si_dev, "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); @@ -4529,9 +4530,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf, if (msg->rsp_size < 2) { /* Message is too small to be correct. */ - dev_warn(intf->si_dev, - "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", - (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); + dev_warn_ratelimited(intf->si_dev, + "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", + (msg->data[0] >> 2) | 1, + msg->data[1], msg->rsp_size); return_unspecified: /* Generate an error response for the message. */ @@ -4561,14 +4563,14 @@ return_unspecified: } else if ((msg->data_size >= 2) && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) && (msg->data[1] == IPMI_SEND_MSG_CMD) - && (msg->user_data == NULL)) { + && (msg->recv_msg == NULL)) { if (intf->in_shutdown || intf->run_to_completion) goto out; /* * This is the local response to a command send, start - * the timer for these. The user_data will not be + * the timer for these. The recv_msg will not be * NULL if this is a response send, and we will let * response sends just go through. */ @@ -4628,7 +4630,7 @@ return_unspecified: requeue = handle_ipmb_direct_rcv_rsp(intf, msg); } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) && (msg->rsp[1] == IPMI_SEND_MSG_CMD) - && (msg->user_data != NULL)) { + && (msg->recv_msg != NULL)) { /* * It's a response to a response we sent. For this we * deliver a send message response to the user. @@ -4645,7 +4647,7 @@ return_unspecified: cc = msg->rsp[2]; process_response_response: - recv_msg = msg->user_data; + recv_msg = msg->recv_msg; requeue = 0; if (!recv_msg) @@ -4801,6 +4803,7 @@ static void smi_work(struct work_struct *t) int run_to_completion = READ_ONCE(intf->run_to_completion); struct ipmi_smi_msg *newmsg = NULL; struct ipmi_recv_msg *msg, *msg2; + int cc; /* * Start the next message if available. @@ -4809,7 +4812,7 @@ static void smi_work(struct work_struct *t) * because the lower layer is allowed to hold locks while calling * message delivery. */ - +restart: if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); if (intf->curr_msg == NULL && !intf->in_shutdown) { @@ -4830,8 +4833,17 @@ static void smi_work(struct work_struct *t) if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); - if (newmsg) - intf->handlers->sender(intf->send_info, newmsg); + if (newmsg) { + cc = intf->handlers->sender(intf->send_info, newmsg); + if (cc) { + if (newmsg->recv_msg) + deliver_err_response(intf, + newmsg->recv_msg, cc); + else + ipmi_free_smi_msg(newmsg); + goto restart; + } + } handle_new_recv_msgs(intf); @@ -4868,12 +4880,10 @@ static void smi_work(struct work_struct *t) list_del(&msg->link); - if (refcount_read(&user->destroyed) == 0) { + if (refcount_read(&user->destroyed) == 0) ipmi_free_recv_msg(msg); - } else { - atomic_dec(&user->nr_msgs); + else user->handler->ipmi_recv_hndl(msg, user->handler_data); - } } mutex_unlock(&intf->user_msgs_mutex); @@ -4951,8 +4961,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg, static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, struct list_head *timeouts, unsigned long timeout_period, - int slot, unsigned long *flags, - bool *need_timer) + int slot, bool *need_timer) { struct ipmi_recv_msg *msg; @@ -5004,7 +5013,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, return; } - spin_unlock_irqrestore(&intf->seq_lock, *flags); + mutex_unlock(&intf->seq_lock); /* * Send the new message. We send with a zero @@ -5025,7 +5034,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent, } else ipmi_free_smi_msg(smi_msg); - spin_lock_irqsave(&intf->seq_lock, *flags); + mutex_lock(&intf->seq_lock); } } @@ -5052,7 +5061,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, * list. */ INIT_LIST_HEAD(&timeouts); - spin_lock_irqsave(&intf->seq_lock, flags); + mutex_lock(&intf->seq_lock); if (intf->ipmb_maintenance_mode_timeout) { if (intf->ipmb_maintenance_mode_timeout <= timeout_period) intf->ipmb_maintenance_mode_timeout = 0; @@ -5062,8 +5071,8 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) check_msg_timeout(intf, &intf->seq_table[i], &timeouts, timeout_period, i, - &flags, &need_timer); - spin_unlock_irqrestore(&intf->seq_lock, flags); + &need_timer); + mutex_unlock(&intf->seq_lock); list_for_each_entry_safe(msg, msg2, &timeouts, link) deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE); @@ -5083,7 +5092,9 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, -= timeout_period; if (!intf->maintenance_mode && (intf->auto_maintenance_timeout <= 0)) { - intf->maintenance_mode_enable = false; + intf->maintenance_mode_state = + IPMI_MAINTENANCE_MODE_STATE_OFF; + intf->auto_maintenance_timeout = 0; maintenance_mode_update(intf); } } @@ -5099,15 +5110,13 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, static void ipmi_request_event(struct ipmi_smi *intf) { /* No event requests when in maintenance mode. */ - if (intf->maintenance_mode_enable) + if (intf->maintenance_mode_state) return; if (!intf->in_shutdown) intf->handlers->request_events(intf->send_info); } -static struct timer_list ipmi_timer; - static atomic_t stop_operation; static void ipmi_timeout_work(struct work_struct *work) @@ -5131,6 +5140,8 @@ static void ipmi_timeout_work(struct work_struct *work) } need_timer = true; } + if (intf->maintenance_mode_state) + need_timer = true; need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); } @@ -5174,7 +5185,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); if (rv) { rv->done = free_smi_msg; - rv->user_data = NULL; + rv->recv_msg = NULL; rv->type = IPMI_SMI_MSG_TYPE_NORMAL; atomic_inc(&smi_msg_inuse_count); } @@ -5190,27 +5201,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg) kfree(msg); } -static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) +static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user) { struct ipmi_recv_msg *rv; + if (user) { + if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) { + atomic_dec(&user->nr_msgs); + return ERR_PTR(-EBUSY); + } + } + rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); - if (rv) { - rv->user = NULL; - rv->done = free_recv_msg; - atomic_inc(&recv_msg_inuse_count); + if (!rv) { + if (user) + atomic_dec(&user->nr_msgs); + return ERR_PTR(-ENOMEM); } + + rv->user = user; + rv->done = free_recv_msg; + if (user) + kref_get(&user->refcount); + atomic_inc(&recv_msg_inuse_count); return rv; } void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) { - if (msg->user && !oops_in_progress) + if (msg->user && !oops_in_progress) { + atomic_dec(&msg->user->nr_msgs); kref_put(&msg->user->refcount, free_ipmi_user); + } msg->done(msg); } EXPORT_SYMBOL(ipmi_free_recv_msg); +static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg, + struct ipmi_user *user) +{ + WARN_ON_ONCE(msg->user); /* User should not be set. */ + msg->user = user; + atomic_inc(&user->nr_msgs); + kref_get(&user->refcount); +} + static atomic_t panic_done_count = ATOMIC_INIT(0); static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c index 4a2efafcd1f8..52a1130defe5 100644 --- a/drivers/char/ipmi/ipmi_powernv.c +++ b/drivers/char/ipmi/ipmi_powernv.c @@ -51,7 +51,7 @@ static void send_error_reply(struct ipmi_smi_powernv *smi, ipmi_smi_msg_received(smi->intf, msg); } -static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg) +static int ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg) { struct ipmi_smi_powernv *smi = send_info; struct opal_ipmi_msg *opal_msg; @@ -93,18 +93,19 @@ static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg) smi->interface_id, opal_msg, size); rc = opal_ipmi_send(smi->interface_id, opal_msg, size); pr_devel("%s: -> %d\n", __func__, rc); - - if (!rc) { - smi->cur_msg = msg; - spin_unlock_irqrestore(&smi->msg_lock, flags); - return; + if (rc) { + comp = IPMI_ERR_UNSPECIFIED; + goto err_unlock; } - comp = IPMI_ERR_UNSPECIFIED; + smi->cur_msg = msg; + spin_unlock_irqrestore(&smi->msg_lock, flags); + return IPMI_CC_NO_ERROR; + err_unlock: spin_unlock_irqrestore(&smi->msg_lock, flags); err: - send_error_reply(smi, msg, comp); + return comp; } static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi) diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h index 508c3fd45877..687835b53da5 100644 --- a/drivers/char/ipmi/ipmi_si.h +++ b/drivers/char/ipmi/ipmi_si.h @@ -101,6 +101,13 @@ void ipmi_si_pci_shutdown(void); static inline void ipmi_si_pci_init(void) { } static inline void ipmi_si_pci_shutdown(void) { } #endif +#ifdef CONFIG_IPMI_LS2K +void ipmi_si_ls2k_init(void); +void ipmi_si_ls2k_shutdown(void); +#else +static inline void ipmi_si_ls2k_init(void) { } +static inline void ipmi_si_ls2k_shutdown(void) { } +#endif #ifdef CONFIG_PARISC void ipmi_si_parisc_init(void); void ipmi_si_parisc_shutdown(void); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 8b5524069c15..70e55f5ff85e 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -53,6 +53,7 @@ #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a short timeout */ +#define SI_TIMEOUT_HOSED (HZ) /* 1 second when in hosed state. */ enum si_intf_state { SI_NORMAL, @@ -61,7 +62,8 @@ enum si_intf_state { SI_CLEARING_FLAGS, SI_GETTING_MESSAGES, SI_CHECKING_ENABLES, - SI_SETTING_ENABLES + SI_SETTING_ENABLES, + SI_HOSED /* FIXME - add watchdog stuff. */ }; @@ -313,7 +315,7 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode) static enum si_sm_result start_next_msg(struct smi_info *smi_info) { - int rv; + int rv; if (!smi_info->waiting_msg) { smi_info->curr_msg = NULL; @@ -390,6 +392,17 @@ static void start_clear_flags(struct smi_info *smi_info) smi_info->si_state = SI_CLEARING_FLAGS; } +static void start_get_flags(struct smi_info *smi_info) +{ + unsigned char msg[2]; + + msg[0] = (IPMI_NETFN_APP_REQUEST << 2); + msg[1] = IPMI_GET_MSG_FLAGS_CMD; + + start_new_msg(smi_info, msg, 2); + smi_info->si_state = SI_GETTING_FLAGS; +} + static void start_getting_msg_queue(struct smi_info *smi_info) { smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); @@ -742,6 +755,8 @@ static void handle_transaction_done(struct smi_info *smi_info) } break; } + case SI_HOSED: /* Shouldn't happen. */ + break; } } @@ -756,6 +771,10 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, enum si_sm_result si_sm_result; restart: + if (smi_info->si_state == SI_HOSED) + /* Just in case, hosed state is only left from the timeout. */ + return SI_SM_HOSED; + /* * There used to be a loop here that waited a little while * (around 25us) before giving up. That turned out to be @@ -779,18 +798,20 @@ restart: /* * Do the before return_hosed_msg, because that - * releases the lock. + * releases the lock. We just disable operations for + * a while and retry in hosed state. */ - smi_info->si_state = SI_NORMAL; + smi_info->si_state = SI_HOSED; if (smi_info->curr_msg != NULL) { /* * If we were handling a user message, format * a response to send to the upper layer to * tell it about the error. */ - return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); + return_hosed_msg(smi_info, IPMI_BUS_ERR); } - goto restart; + smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED); + goto out; } /* @@ -798,8 +819,6 @@ restart: * this if there is not yet an upper layer to handle anything. */ if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) { - unsigned char msg[2]; - if (smi_info->si_state != SI_NORMAL) { /* * We got an ATTN, but we are doing something else. @@ -817,11 +836,7 @@ restart: * interrupts work with the SMI, that's not really * possible. */ - msg[0] = (IPMI_NETFN_APP_REQUEST << 2); - msg[1] = IPMI_GET_MSG_FLAGS_CMD; - - start_new_msg(smi_info, msg, 2); - smi_info->si_state = SI_GETTING_FLAGS; + start_get_flags(smi_info); goto restart; } } @@ -894,27 +909,29 @@ static void flush_messages(void *send_info) * mode. This means we are single-threaded, no need for locks. */ result = smi_event_handler(smi_info, 0); - while (result != SI_SM_IDLE) { + while (result != SI_SM_IDLE && result != SI_SM_HOSED) { udelay(SI_SHORT_TIMEOUT_USEC); result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); } } -static void sender(void *send_info, - struct ipmi_smi_msg *msg) +static int sender(void *send_info, struct ipmi_smi_msg *msg) { struct smi_info *smi_info = send_info; unsigned long flags; debug_timestamp(smi_info, "Enqueue"); + if (smi_info->si_state == SI_HOSED) + return IPMI_BUS_ERR; + if (smi_info->run_to_completion) { /* * If we are running to completion, start it. Upper * layer will call flush_messages to clear it out. */ smi_info->waiting_msg = msg; - return; + return IPMI_CC_NO_ERROR; } spin_lock_irqsave(&smi_info->si_lock, flags); @@ -929,6 +946,7 @@ static void sender(void *send_info, smi_info->waiting_msg = msg; check_start_timer_thread(smi_info); spin_unlock_irqrestore(&smi_info->si_lock, flags); + return IPMI_CC_NO_ERROR; } static void set_run_to_completion(void *send_info, bool i_run_to_completion) @@ -1087,6 +1105,10 @@ static void smi_timeout(struct timer_list *t) spin_lock_irqsave(&(smi_info->si_lock), flags); debug_timestamp(smi_info, "Timer"); + if (smi_info->si_state == SI_HOSED) + /* Try something to see if the BMC is now operational. */ + start_get_flags(smi_info); + jiffies_now = jiffies; time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); @@ -1096,14 +1118,11 @@ static void smi_timeout(struct timer_list *t) /* Running with interrupts, only do long timeouts. */ timeout = jiffies + SI_TIMEOUT_JIFFIES; smi_inc_stat(smi_info, long_timeouts); - goto do_mod_timer; - } - - /* - * If the state machine asks for a short delay, then shorten - * the timer timeout. - */ - if (smi_result == SI_SM_CALL_WITH_DELAY) { + } else if (smi_result == SI_SM_CALL_WITH_DELAY) { + /* + * If the state machine asks for a short delay, then shorten + * the timer timeout. + */ smi_inc_stat(smi_info, short_timeouts); timeout = jiffies + 1; } else { @@ -1111,7 +1130,6 @@ static void smi_timeout(struct timer_list *t) timeout = jiffies + SI_TIMEOUT_JIFFIES; } -do_mod_timer: if (smi_result != SI_SM_IDLE) smi_mod_timer(smi_info, timeout); else @@ -2120,6 +2138,8 @@ static int __init init_ipmi_si(void) ipmi_si_pci_init(); + ipmi_si_ls2k_init(); + ipmi_si_parisc_init(); mutex_lock(&smi_infos_lock); @@ -2331,6 +2351,8 @@ static void cleanup_ipmi_si(void) ipmi_si_pci_shutdown(); + ipmi_si_ls2k_shutdown(); + ipmi_si_parisc_shutdown(); ipmi_si_platform_shutdown(); diff --git a/drivers/char/ipmi/ipmi_si_ls2k.c b/drivers/char/ipmi/ipmi_si_ls2k.c new file mode 100644 index 000000000000..45442c257efd --- /dev/null +++ b/drivers/char/ipmi/ipmi_si_ls2k.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Loongson-2K BMC IPMI interface + * + * Copyright (C) 2024-2025 Loongson Technology Corporation Limited. + * + * Authors: + * Chong Qiao <qiaochong@loongson.cn> + * Binbin Zhou <zhoubinbin@loongson.cn> + */ + +#include <linux/bitfield.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/types.h> + +#include "ipmi_si.h" + +#define LS2K_KCS_FIFO_IBFH 0x0 +#define LS2K_KCS_FIFO_IBFT 0x1 +#define LS2K_KCS_FIFO_OBFH 0x2 +#define LS2K_KCS_FIFO_OBFT 0x3 + +/* KCS registers */ +#define LS2K_KCS_REG_STS 0x4 +#define LS2K_KCS_REG_DATA_OUT 0x5 +#define LS2K_KCS_REG_DATA_IN 0x6 +#define LS2K_KCS_REG_CMD 0x8 + +#define LS2K_KCS_CMD_DATA 0xa +#define LS2K_KCS_VERSION 0xb +#define LS2K_KCS_WR_REQ 0xc +#define LS2K_KCS_WR_ACK 0x10 + +#define LS2K_KCS_STS_OBF BIT(0) +#define LS2K_KCS_STS_IBF BIT(1) +#define LS2K_KCS_STS_SMS_ATN BIT(2) +#define LS2K_KCS_STS_CMD BIT(3) + +#define LS2K_KCS_DATA_MASK (LS2K_KCS_STS_OBF | LS2K_KCS_STS_IBF | LS2K_KCS_STS_CMD) + +static bool ls2k_registered; + +static unsigned char ls2k_mem_inb_v0(const struct si_sm_io *io, unsigned int offset) +{ + void __iomem *addr = io->addr; + int reg_offset; + + if (offset & BIT(0)) { + reg_offset = LS2K_KCS_REG_STS; + } else { + writeb(readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_STS_OBF, addr + LS2K_KCS_REG_STS); + reg_offset = LS2K_KCS_REG_DATA_OUT; + } + + return readb(addr + reg_offset); +} + +static unsigned char ls2k_mem_inb_v1(const struct si_sm_io *io, unsigned int offset) +{ + void __iomem *addr = io->addr; + unsigned char inb = 0, cmd; + bool obf, ibf; + + obf = readb(addr + LS2K_KCS_FIFO_OBFH) ^ readb(addr + LS2K_KCS_FIFO_OBFT); + ibf = readb(addr + LS2K_KCS_FIFO_IBFH) ^ readb(addr + LS2K_KCS_FIFO_IBFT); + cmd = readb(addr + LS2K_KCS_CMD_DATA); + + if (offset & BIT(0)) { + inb = readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_DATA_MASK; + inb |= FIELD_PREP(LS2K_KCS_STS_OBF, obf) + | FIELD_PREP(LS2K_KCS_STS_IBF, ibf) + | FIELD_PREP(LS2K_KCS_STS_CMD, cmd); + } else { + inb = readb(addr + LS2K_KCS_REG_DATA_OUT); + writeb(readb(addr + LS2K_KCS_FIFO_OBFH), addr + LS2K_KCS_FIFO_OBFT); + } + + return inb; +} + +static void ls2k_mem_outb_v0(const struct si_sm_io *io, unsigned int offset, + unsigned char val) +{ + void __iomem *addr = io->addr; + unsigned char sts = readb(addr + LS2K_KCS_REG_STS); + int reg_offset; + + if (sts & LS2K_KCS_STS_IBF) + return; + + if (offset & BIT(0)) { + reg_offset = LS2K_KCS_REG_CMD; + sts |= LS2K_KCS_STS_CMD; + } else { + reg_offset = LS2K_KCS_REG_DATA_IN; + sts &= ~LS2K_KCS_STS_CMD; + } + + writew(val, addr + reg_offset); + writeb(sts | LS2K_KCS_STS_IBF, addr + LS2K_KCS_REG_STS); + writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ); +} + +static void ls2k_mem_outb_v1(const struct si_sm_io *io, unsigned int offset, + unsigned char val) +{ + void __iomem *addr = io->addr; + unsigned char ibfh, ibft; + int reg_offset; + + ibfh = readb(addr + LS2K_KCS_FIFO_IBFH); + ibft = readb(addr + LS2K_KCS_FIFO_IBFT); + + if (ibfh ^ ibft) + return; + + reg_offset = (offset & BIT(0)) ? LS2K_KCS_REG_CMD : LS2K_KCS_REG_DATA_IN; + writew(val, addr + reg_offset); + + writeb(offset & BIT(0), addr + LS2K_KCS_CMD_DATA); + writeb(!ibft, addr + LS2K_KCS_FIFO_IBFH); + writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ); +} + +static void ls2k_mem_cleanup(struct si_sm_io *io) +{ + if (io->addr) + iounmap(io->addr); +} + +static int ipmi_ls2k_mem_setup(struct si_sm_io *io) +{ + unsigned char version; + + io->addr = ioremap(io->addr_data, io->regspacing); + if (!io->addr) + return -EIO; + + version = readb(io->addr + LS2K_KCS_VERSION); + + io->inputb = version ? ls2k_mem_inb_v1 : ls2k_mem_inb_v0; + io->outputb = version ? ls2k_mem_outb_v1 : ls2k_mem_outb_v0; + io->io_cleanup = ls2k_mem_cleanup; + + return 0; +} + +static int ipmi_ls2k_probe(struct platform_device *pdev) +{ + struct si_sm_io io; + + memset(&io, 0, sizeof(io)); + + io.si_info = &ipmi_kcs_si_info; + io.io_setup = ipmi_ls2k_mem_setup; + io.addr_data = pdev->resource[0].start; + io.regspacing = resource_size(&pdev->resource[0]); + io.dev = &pdev->dev; + + dev_dbg(&pdev->dev, "addr 0x%lx, spacing %d.\n", io.addr_data, io.regspacing); + + return ipmi_si_add_smi(&io); +} + +static void ipmi_ls2k_remove(struct platform_device *pdev) +{ + ipmi_si_remove_by_dev(&pdev->dev); +} + +struct platform_driver ipmi_ls2k_platform_driver = { + .driver = { + .name = "ls2k-ipmi-si", + }, + .probe = ipmi_ls2k_probe, + .remove = ipmi_ls2k_remove, +}; + +void ipmi_si_ls2k_init(void) +{ + platform_driver_register(&ipmi_ls2k_platform_driver); + ls2k_registered = true; +} + +void ipmi_si_ls2k_shutdown(void) +{ + if (ls2k_registered) + platform_driver_unregister(&ipmi_ls2k_platform_driver); +} diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 1bc42830444d..1b63f7d2fcda 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -1068,8 +1068,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags) } } -static void sender(void *send_info, - struct ipmi_smi_msg *msg) +static int sender(void *send_info, struct ipmi_smi_msg *msg) { struct ssif_info *ssif_info = send_info; unsigned long oflags, *flags; @@ -1089,6 +1088,7 @@ static void sender(void *send_info, msg->data[0], msg->data[1], (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC); } + return IPMI_CC_NO_ERROR; } static int get_smi_info(void *send_info, struct ipmi_smi_info *data) diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 48839958b0b1..34b815901b20 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -512,11 +512,18 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma) return 0; } +#ifndef CONFIG_MMU +static unsigned long get_unmapped_area_zero(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + return -ENOSYS; +} +#else static unsigned long get_unmapped_area_zero(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { -#ifdef CONFIG_MMU if (flags & MAP_SHARED) { /* * mmap_zero() will call shmem_zero_setup() to create a file, @@ -527,12 +534,18 @@ static unsigned long get_unmapped_area_zero(struct file *file, return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); } - /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ - return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); + /* + * Otherwise flags & MAP_PRIVATE: with no shmem object beneath it, + * attempt to map aligned to huge page size if possible, otherwise we + * fall back to system page size mappings. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return thp_get_unmapped_area(file, addr, len, pgoff, flags); #else - return -ENOSYS; + return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); #endif } +#endif /* CONFIG_MMU */ static ssize_t write_full(struct file *file, const char __user *buf, size_t count, loff_t *ppos) diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index dddd702b2454..ba3924eb13ba 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -189,6 +189,15 @@ config TCG_IBMVTPM will be accessible from within Linux. To compile this driver as a module, choose M here; the module will be called tpm_ibmvtpm. +config TCG_LOONGSON + tristate "Loongson TPM Interface" + depends on MFD_LOONGSON_SE + help + If you want to make Loongson TPM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tpm_loongson. + config TCG_XEN tristate "XEN TPM Interface" depends on TCG_TPM && XEN diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 9de1b3ea34a9..5b5cdc0d32e4 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -46,3 +46,4 @@ obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o +obj-$(CONFIG_TCG_LOONGSON) += tpm_loongson.o diff --git a/drivers/char/tpm/tpm_loongson.c b/drivers/char/tpm/tpm_loongson.c new file mode 100644 index 000000000000..9e50250763d1 --- /dev/null +++ b/drivers/char/tpm/tpm_loongson.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Loongson Technology Corporation Limited. */ + +#include <linux/device.h> +#include <linux/mfd/loongson-se.h> +#include <linux/platform_device.h> +#include <linux/wait.h> + +#include "tpm.h" + +struct tpm_loongson_cmd { + u32 cmd_id; + u32 data_off; + u32 data_len; + u32 pad[5]; +}; + +static int tpm_loongson_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev); + struct tpm_loongson_cmd *cmd_ret = tpm_engine->command_ret; + + if (cmd_ret->data_len > count) + return -EIO; + + memcpy(buf, tpm_engine->data_buffer, cmd_ret->data_len); + + return cmd_ret->data_len; +} + +static int tpm_loongson_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t count) +{ + struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev); + struct tpm_loongson_cmd *cmd = tpm_engine->command; + + if (count > tpm_engine->buffer_size) + return -E2BIG; + + cmd->data_len = count; + memcpy(tpm_engine->data_buffer, buf, count); + + return loongson_se_send_engine_cmd(tpm_engine); +} + +static const struct tpm_class_ops tpm_loongson_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tpm_loongson_recv, + .send = tpm_loongson_send, +}; + +static int tpm_loongson_probe(struct platform_device *pdev) +{ + struct loongson_se_engine *tpm_engine; + struct device *dev = &pdev->dev; + struct tpm_loongson_cmd *cmd; + struct tpm_chip *chip; + + tpm_engine = loongson_se_init_engine(dev->parent, SE_ENGINE_TPM); + if (!tpm_engine) + return -ENODEV; + cmd = tpm_engine->command; + cmd->cmd_id = SE_CMD_TPM; + cmd->data_off = tpm_engine->buffer_off; + + chip = tpmm_chip_alloc(dev, &tpm_loongson_ops); + if (IS_ERR(chip)) + return PTR_ERR(chip); + chip->flags = TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_IRQ; + dev_set_drvdata(&chip->dev, tpm_engine); + + return tpm_chip_register(chip); +} + +static struct platform_driver tpm_loongson = { + .probe = tpm_loongson_probe, + .driver = { + .name = "tpm_loongson", + }, +}; +module_platform_driver(tpm_loongson); + +MODULE_ALIAS("platform:tpm_loongson"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Loongson TPM driver"); |