diff mbox series

[ovs-dev,2/2] ovs: fix wrong quote

Message ID 1a5076327165b3b210d54c89f6b914af99e2d0e9.1625299400.git.wangyunjian@huawei.com
State Accepted
Headers show
Series fixes for ovs'comments | expand

Checks

Context Check Description
ovsrobot/apply-robot warning apply and check: warning
ovsrobot/github-robot success github build: passed

Commit Message

Yunjian Wang July 3, 2021, 8:25 a.m. UTC
From: Yunjian Wang <wangyunjian@huawei.com>

Remove the coma character by using ' and " character.

Signed-off-by: Yunjian Wang <wangyunjian@huawei.com>
---
 lib/dpif-netdev.c |  4 ++--
 lib/flow.h        | 24 ++++++++++++------------
 ovsdb/raft.c      | 24 ++++++++++++------------
 3 files changed, 26 insertions(+), 26 deletions(-)
diff mbox series

Patch

diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 026b52d27..26218ad72 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -7513,12 +7513,12 @@  dp_netdev_input__(struct dp_netdev_pmd_thread *pmd,
 
     /* All the flow batches need to be reset before any call to
      * packet_batch_per_flow_execute() as it could potentially trigger
-     * recirculation. When a packet matching flow ‘j’ happens to be
+     * recirculation. When a packet matching flow 'j' happens to be
      * recirculated, the nested call to dp_netdev_input__() could potentially
      * classify the packet as matching another flow - say 'k'. It could happen
      * that in the previous call to dp_netdev_input__() that same flow 'k' had
      * already its own batches[k] still waiting to be served.  So if its
-     * ‘batch’ member is not reset, the recirculated packet would be wrongly
+     * 'batch' member is not reset, the recirculated packet would be wrongly
      * appended to batches[k] of the 1st call to dp_netdev_input__(). */
     for (i = 0; i < n_batches; i++) {
         batches[i].flow->batch = NULL;
diff --git a/lib/flow.h b/lib/flow.h
index b32f0b277..467b2801d 100644
--- a/lib/flow.h
+++ b/lib/flow.h
@@ -595,20 +595,20 @@  struct mf_for_each_in_map_aux {
     size_t unit;             /* Current 64-bit unit of the flowmaps
                                 being processed. */
     struct flowmap fmap;     /* Remaining 1-bits corresponding to the
-                                64-bit words in ‘values’ */
+                                64-bit words in 'values' */
     struct flowmap map;      /* Remaining 1-bits corresponding to the
                                 64-bit words of interest. */
     const uint64_t *values;  /* 64-bit words corresponding to the
-                                1-bits in ‘fmap’. */
+                                1-bits in 'fmap'. */
 };
 
-/* Get the data from ‘aux->values’ corresponding to the next lowest 1-bit
- * in ‘aux->map’, given that ‘aux->values’ points to an array of 64-bit
- * words corresponding to the 1-bits in ‘aux->fmap’, starting from the
+/* Get the data from 'aux->values' corresponding to the next lowest 1-bit
+ * in 'aux->map', given that 'aux->values' points to an array of 64-bit
+ * words corresponding to the 1-bits in 'aux->fmap', starting from the
  * rightmost 1-bit.
  *
- * Returns ’true’ if the traversal is incomplete, ‘false’ otherwise.
- * ‘aux’ is prepared for the next iteration after each call.
+ * Returns 'true' if the traversal is incomplete, 'false' otherwise.
+ * 'aux' is prepared for the next iteration after each call.
  *
  * This is used to traverse through, for example, the values in a miniflow
  * representation of a flow key selected by non-zero 64-bit words in a
@@ -634,12 +634,12 @@  mf_get_next_in_map(struct mf_for_each_in_map_aux *aux,
     *map -= rm1bit;
     fmap = &aux->fmap.bits[aux->unit];
 
-    /* If the rightmost 1-bit found from the current unit in ‘aux->map’
-     * (‘rm1bit’) is also present in ‘aux->fmap’, store the corresponding
-     * value from ‘aux->values’ to ‘*value', otherwise store 0. */
+    /* If the rightmost 1-bit found from the current unit in 'aux->map'
+     * ('rm1bit') is also present in 'aux->fmap', store the corresponding
+     * value from 'aux->values' to '*value', otherwise store 0. */
     if (OVS_LIKELY(*fmap & rm1bit)) {
-        /* Skip all 64-bit words in ‘values’ preceding the one corresponding
-         * to ‘rm1bit’. */
+        /* Skip all 64-bit words in 'values' preceding the one corresponding
+         * to 'rm1bit'. */
         map_t trash = *fmap & (rm1bit - 1);
 
         /* Avoid resetting 'fmap' and calling count_1bits() when trash is
diff --git a/ovsdb/raft.c b/ovsdb/raft.c
index 5bb901fd4..2fb515651 100644
--- a/ovsdb/raft.c
+++ b/ovsdb/raft.c
@@ -1981,7 +1981,7 @@  raft_run(struct raft *raft)
              * follower.
              *
              * Raft paper section 6.2: Leaders: A server might be in the leader
-             * state, but if it isn’t the current leader, it could be
+             * state, but if it isn't the current leader, it could be
              * needlessly delaying client requests. For example, suppose a
              * leader is partitioned from the rest of the cluster, but it can
              * still communicate with a particular client. Without additional
@@ -1989,7 +1989,7 @@  raft_run(struct raft *raft)
              * being unable to replicate a log entry to any other servers.
              * Meanwhile, there might be another leader of a newer term that is
              * able to communicate with a majority of the cluster and would be
-             * able to commit the client’s request. Thus, a leader in Raft
+             * able to commit the client's request. Thus, a leader in Raft
              * steps down if an election timeout elapses without a successful
              * round of heartbeats to a majority of its cluster; this allows
              * clients to retry their requests with another server.  */
@@ -2733,8 +2733,8 @@  raft_become_leader(struct raft *raft)
      *     which those are.  To find out, it needs to commit an entry from its
      *     term.  Raft handles this by having each leader commit a blank no-op
      *     entry into the log at the start of its term.  As soon as this no-op
-     *     entry is committed, the leader’s commit index will be at least as
-     *     large as any other servers’ during its term.
+     *     entry is committed, the leader's commit index will be at least as
+     *     large as any other servers' during its term.
      */
     raft_command_unref(raft_command_execute__(raft, NULL, NULL, 0, NULL,
                                               NULL));
@@ -2750,7 +2750,7 @@  raft_receive_term__(struct raft *raft, const struct raft_rpc_common *common,
     /* Section 3.3 says:
      *
      *     Current terms are exchanged whenever servers communicate; if one
-     *     server’s current term is smaller than the other’s, then it updates
+     *     server's current term is smaller than the other's, then it updates
      *     its current term to the larger value.  If a candidate or leader
      *     discovers that its term is out of date, it immediately reverts to
      *     follower state.  If a server receives a request with a stale term
@@ -3130,8 +3130,8 @@  raft_update_leader(struct raft *raft, const struct uuid *sid)
     if (raft->role == RAFT_CANDIDATE) {
         /* Section 3.4: While waiting for votes, a candidate may
          * receive an AppendEntries RPC from another server claiming to
-         * be leader. If the leader’s term (included in its RPC) is at
-         * least as large as the candidate’s current term, then the
+         * be leader. If the leader's term (included in its RPC) is at
+         * least as large as the candidate's current term, then the
          * candidate recognizes the leader as legitimate and returns to
          * follower state. */
         raft->role = RAFT_FOLLOWER;
@@ -3145,7 +3145,7 @@  raft_handle_append_request(struct raft *raft,
 {
     /* We do not check whether the server that sent the request is part of the
      * cluster.  As section 4.1 says, "A server accepts AppendEntries requests
-     * from a leader that is not part of the server’s latest configuration.
+     * from a leader that is not part of the server's latest configuration.
      * Otherwise, a new server could never be added to the cluster (it would
      * never accept any log entries preceding the configuration entry that adds
      * the server)." */
@@ -3492,7 +3492,7 @@  raft_handle_append_reply(struct raft *raft,
          * more quickly, including those described in Chapter 3. The simplest
          * approach to solving this particular problem of adding a new server,
          * however, is to have followers return the length of their logs in the
-         * AppendEntries response; this allows the leader to cap the follower’s
+         * AppendEntries response; this allows the leader to cap the follower's
          * nextIndex accordingly." */
         s->next_index = (s->next_index > 0
                          ? MIN(s->next_index - 1, rpy->log_end)
@@ -3557,8 +3557,8 @@  raft_should_suppress_disruptive_server(struct raft *raft,
      *    election without waiting an election timeout.  In that case,
      *    RequestVote messages should be processed by other servers even when
      *    they believe a current cluster leader exists.  Those RequestVote
-     *    requests can include a special flag to indicate this behavior (“I
-     *    have permission to disrupt the leader--it told me to!”).
+     *    requests can include a special flag to indicate this behavior ("I
+     *    have permission to disrupt the leader--it told me to!").
      *
      * This clearly describes how the followers should act, but not the leader.
      * We just ignore vote requests that arrive at a current leader.  This
@@ -3613,7 +3613,7 @@  raft_handle_vote_request__(struct raft *raft,
     }
 
     /* Section 3.6.1: "The RequestVote RPC implements this restriction: the RPC
-     * includes information about the candidate’s log, and the voter denies its
+     * includes information about the candidate's log, and the voter denies its
      * vote if its own log is more up-to-date than that of the candidate.  Raft
      * determines which of two logs is more up-to-date by comparing the index
      * and term of the last entries in the logs.  If the logs have last entries