ewlu / gcc-precommit-ci

2 stars 0 forks source link

Patch Status 29435-ReRePATCH_v5_RISCV_Handle_differences_between_XTheadvector_and_Vector-1 #1132

Closed github-actions[bot] closed 2 months ago

github-actions[bot] commented 8 months ago

Precommit CI Run information

Logs can be found in the associated Github Actions run: https://github.com/ewlu/gcc-precommit-ci/actions/runs/7471744224

Patch information

Applied patches: 1 -> 1 Associated series: https://patchwork.sourceware.org/project/gcc/list/?series=29435 Last patch applied: https://patchwork.sourceware.org/project/gcc/patch/cd3b62c3-1f15-4f8b-af72-eeb5b1f0376c.cooper.joshua@linux.alibaba.com/ Patch id: 83727

Build Targets

Some targets are built as multilibs. If a build target ends with multilib, please refer to the table below to see all the targets within that multilib. Target name -march string
newlib-rv64gc-lp64d-multilib rv32gc-ilp32d, rv64gc-lp64d
newlib-rv64gcv-lp64d-multilib rv64gcv-lp64d
linux-rv64gcv-lp64d-multilib rv32gcv-ilp32d, rv64gcv-lp64d

Target Information

Target Shorthand -march string
Bitmanip gc_zba_zbb_zbc_zbs

Notes

Testsuite results use a more lenient allowlist to reduce error reporting with flakey tests. Please take a look at the current allowlist. Results come from a sum file comparator. Each patch is applied to a well known, non-broken baseline taken from our gcc postcommit framework (here) which runs the full gcc testsuite every 6 hours. If you have any questions or encounter any issues which may seem like false-positives, please contact us at patchworks-ci@rivosinc.com

github-actions[bot] commented 8 months ago

Lint Status

The following issues have been found with 29435-ReRePATCH_v5_RISCV_Handle_differences_between_XTheadvector_and_Vector-1 using gcc's ./contrib/check_GNU_style.py. Please use your best judgement when resolving these issues. These are only warnings and do not need to be resolved in order to merge your patch. If any of these warnings seem like false-positives that could be guarded against please contact me: patchworks-ci@rivosinc.com.

Traceback (most recent call last):
  File "./gcc/contrib/check_GNU_style.py", line 45, in <module>
    main()
  File "./gcc/contrib/check_GNU_style.py", line 43, in main
    check_GNU_style_file(diff_file, format)
  File "/home/runner/work/gcc-precommit-ci/gcc-precommit-ci/riscv-gnu-toolchain/gcc/contrib/check_GNU_style_lib.py", line 279, in check_GNU_style_file
    patch = PatchSet(file)
  File "/home/runner/.local/lib/python3.8/site-packages/unidiff/patch.py", line 462, in __init__
    self._parse(data, encoding=encoding, metadata_only=metadata_only)
  File "/home/runner/.local/lib/python3.8/site-packages/unidiff/patch.py", line 552, in _parse
    current_file._parse_hunk(line, diff, encoding, metadata_only)
  File "/home/runner/.local/lib/python3.8/site-packages/unidiff/patch.py", line 318, in _parse_hunk
    raise UnidiffParseError(
unidiff.errors.UnidiffParseError: Hunk diff line expected:    [(match_operand      0 "register_operand")

Additional information

github-actions[bot] commented 8 months ago

Apply Status

Target Status
Baseline hash: https://github.com/gcc-mirror/gcc/commit/9f7afa99c67f039e43019ebd08d14a7f01e2d89c Failed
Tip of tree hash: https://github.com/gcc-mirror/gcc/commit/0141ee79d72a3bccf040e02cd047fe585b96fa8d Failed

Command

> git am ../patches/*.patch --whitespace=fix -q --3way --empty=drop

Output

error: corrupt patch at line 41
error: could not build fake ancestor
hint: Use 'git am --show-current-patch=diff' to see the failed patch
Patch failed at 0001 Re:Re:[PATCH v5] RISC-V: Handle differences between XTheadvector and Vector
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".
---
 gcc/config.gcc                                |   2 +-
 gcc/config/riscv/autovec.md                   |   2 +-
 gcc/config/riscv/predicates.md                |   4 +-
 gcc/config/riscv/riscv-c.cc                   |   3 +-
 gcc/config/riscv/riscv-string.cc              |   3 +-
 gcc/config/riscv/riscv-v.cc                   |   2 +-
 .../riscv/riscv-vector-builtins-bases.cc      |  48 ++++--
 .../riscv/riscv-vector-builtins-shapes.cc     |  23 +++
 gcc/config/riscv/riscv-vector-switch.def      | 150 +++++++++---------
 gcc/config/riscv/riscv.cc                     |  20 ++-
 gcc/config/riscv/riscv_th_vector.h            |  49 ++++++
 gcc/config/riscv/thead-vector.md              | 102 ++++++++++++
 gcc/config/riscv/thead.cc                     |  23 ++-
 gcc/config/riscv/vector.md                    |  49 ++++--
 .../gcc.target/riscv/rvv/base/abi-1.c         |   2 +-
 .../gcc.target/riscv/rvv/base/pragma-1.c      |   2 +-
 gcc/testsuite/lib/target-supports.exp         |  12 ++
 17 files changed, 383 insertions(+), 113 deletions(-)
 create mode 100644 gcc/config/riscv/riscv_th_vector.h
 create mode 100644 gcc/config/riscv/thead-vector.md

diff --git a/gcc/config.gcc b/gcc/config.gcc
index 7e583390024..047e4c02cf4 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -549,7 +549,7 @@ riscv*)
    extra_objs="${extra_objs} riscv-vector-builtins.o riscv-vector-builtins-shapes.o riscv-vector-builtins-bases.o"
    extra_objs="${extra_objs} thead.o riscv-target-attr.o"
    d_target_objs="riscv-d.o"
-   extra_headers="riscv_vector.h"
+   extra_headers="riscv_vector.h riscv_th_vector.h"
    target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.cc"
    target_gtfiles="$target_gtfiles \$(srcdir)/config/riscv/riscv-vector-builtins.h"
    ;;
diff --git a/gcc/config/riscv/autovec.md b/gcc/config/riscv/autovec.md
index 775eaa825b0..0477781cabe 100644
--- a/gcc/config/riscv/autovec.md
+++ b/gcc/config/riscv/autovec.md
@@ -2579,7 +2579,7 @@
   [(match_operand      0 "register_operand")
    (match_operand      1 "memory_operand")
    (match_operand:ANYI 2 "const_int_operand")]
-  "TARGET_VECTOR"
+  "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
   {
     riscv_vector::expand_rawmemchr(<MODE>mode, operands[0], operands[1],
                   operands[2]);
diff --git a/gcc/config/riscv/predicates.md b/gcc/config/riscv/predicates.md
index b1a79cae50a..0337da88284 100644
--- a/gcc/config/riscv/predicates.md
+++ b/gcc/config/riscv/predicates.md
@@ -428,7 +428,9 @@
 ;; Predicates for the V extension.
 (define_special_predicate "vector_length_operand"
   (ior (match_operand 0 "pmode_register_operand")
-       (match_operand 0 "const_csr_operand")))
+       (and (ior (match_test "TARGET_XTHEADVECTOR && rtx_equal_p (op, const0_rtx)")
+        (match_test "!TARGET_XTHEADVECTOR"))
+    (match_operand 0 "const_csr_operand"))))

 (define_special_predicate "autovec_length_operand"
   (ior (match_operand 0 "pmode_register_operand")
diff --git a/gcc/config/riscv/riscv-c.cc b/gcc/config/riscv/riscv-c.cc
index 422ddc2c308..2e306057347 100644
--- a/gcc/config/riscv/riscv-c.cc
+++ b/gcc/config/riscv/riscv-c.cc
@@ -195,7 +195,8 @@ riscv_pragma_intrinsic (cpp_reader *)

   const char *name = TREE_STRING_POINTER (x);

-  if (strcmp (name, "vector") == 0)
+  if (strcmp (name, "vector") == 0
+      || strcmp (name, "xtheadvector") == 0)
     {
       if (!TARGET_VECTOR)
    {
diff --git a/gcc/config/riscv/riscv-string.cc b/gcc/config/riscv/riscv-string.cc
index f227b8667ce..b09b51d7526 100644
--- a/gcc/config/riscv/riscv-string.cc
+++ b/gcc/config/riscv/riscv-string.cc
@@ -773,7 +773,8 @@ riscv_expand_block_move_scalar (rtx dest, rtx src, rtx length)
 bool
 riscv_expand_block_move (rtx dest, rtx src, rtx length)
 {
-  if (TARGET_VECTOR && stringop_strategy & STRATEGY_VECTOR)
+  if ((TARGET_VECTOR && !TARGET_XTHEADVECTOR)
+      && stringop_strategy & STRATEGY_VECTOR)
     {
       bool ok = riscv_vector::expand_block_move (dest, src, length);
       if (ok)
diff --git a/gcc/config/riscv/riscv-v.cc b/gcc/config/riscv/riscv-v.cc
index 2491522191a..7e25deb966d 100644
--- a/gcc/config/riscv/riscv-v.cc
+++ b/gcc/config/riscv/riscv-v.cc
@@ -4392,7 +4392,7 @@ cmp_lmul_gt_one (machine_mode mode)
 bool
 vls_mode_valid_p (machine_mode vls_mode)
 {
-  if (!TARGET_VECTOR)
+  if (!TARGET_VECTOR || TARGET_XTHEADVECTOR)
     return false;

   if (riscv_autovec_preference == RVV_SCALABLE)
diff --git a/gcc/config/riscv/riscv-vector-builtins-bases.cc b/gcc/config/riscv/riscv-vector-builtins-bases.cc
index 91af2c00674..46f1a1da33e 100644
--- a/gcc/config/riscv/riscv-vector-builtins-bases.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-bases.cc
@@ -115,23 +115,37 @@ public:

     tree type = builtin_types[e.type.index].vector;
     machine_mode mode = TYPE_MODE (type);
-    /* Normalize same RATO (SEW/LMUL) into same vsetvl instruction.
-
-    - e8,mf8/e16,mf4/e32,mf2/e64,m1 --> e8mf8
-    - e8,mf4/e16,mf2/e32,m1/e64,m2  --> e8mf4
-    - e8,mf2/e16,m1/e32,m2/e64,m4   --> e8mf2
-    - e8,m1/e16,m2/e32,m4/e64,m8    --> e8m1
-    - e8,m2/e16,m4/e32,m8           --> e8m2
-    - e8,m4/e16,m8                  --> e8m4
-    - e8,m8                         --> e8m8
-    */
-    /* SEW.  */
-    e.add_input_operand (Pmode, gen_int_mode (8, Pmode));
-
-    /* LMUL.  */
-    machine_mode e8_mode
-      = get_vector_mode (QImode, GET_MODE_NUNITS (mode)).require ();
-    e.add_input_operand (Pmode, gen_int_mode (get_vlmul (e8_mode), Pmode));
+
+    if (TARGET_XTHEADVECTOR)
+      {
+   machine_mode inner_mode = GET_MODE_INNER (mode);
+   /* SEW.  */
+   e.add_input_operand (Pmode,
+     gen_int_mode (GET_MODE_BITSIZE (inner_mode), Pmode));
+   /* LMUL.  */
+   e.add_input_operand (Pmode,
+     gen_int_mode (get_vlmul (mode), Pmode));
+      }
+    else
+      {
+   /* Normalize same RATO (SEW/LMUL) into same vsetvl instruction.
+
+        - e8,mf8/e16,mf4/e32,mf2/e64,m1 --> e8mf8
+        - e8,mf4/e16,mf2/e32,m1/e64,m2  --> e8mf4
+        - e8,mf2/e16,m1/e32,m2/e64,m4   --> e8mf2
+        - e8,m1/e16,m2/e32,m4/e64,m8    --> e8m1
+        - e8,m2/e16,m4/e32,m8           --> e8m2
+        - e8,m4/e16,m8                  --> e8m4
+        - e8,m8                         --> e8m8
+   */
+   /* SEW.  */
+   e.add_input_operand (Pmode, gen_int_mode (8, Pmode));
+
+   /* LMUL.  */
+   machine_mode e8_mode
+     = get_vector_mode (QImode, GET_MODE_NUNITS (mode)).require ();
+   e.add_input_operand (Pmode, gen_int_mode (get_vlmul (e8_mode), Pmode));
+      }

     /* TAIL_ANY.  */
     e.add_input_operand (Pmode,
diff --git a/gcc/config/riscv/riscv-vector-builtins-shapes.cc b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
index ee8058dcdb1..1e4f4d53de6 100644
--- a/gcc/config/riscv/riscv-vector-builtins-shapes.cc
+++ b/gcc/config/riscv/riscv-vector-builtins-shapes.cc
@@ -33,6 +33,25 @@

 namespace riscv_vector {

+/* Check whether the RETURN_TYPE and ARGUMENT_TYPES are
+   valid for the function.  */
+
+static bool
+check_type (tree return_type, vec<tree> &argument_types)
+{
+  tree arg;
+  unsigned i;
+
+  if (!return_type)
+    return false;
+
+  FOR_EACH_VEC_ELT (argument_types, i, arg)
+    if (!arg)
+      return false;
+
+  return true;
+}
+
 /* Add one function instance for GROUP, using operand suffix at index OI,
    mode suffix at index PAIR && bi and predication suffix at index pred_idx.  */
 static void
@@ -49,6 +68,10 @@ build_one (function_builder &b, const function_group_info &group,
     group.ops_infos.types[vec_type_idx].index);
   b.allocate_argument_types (function_instance, argument_types);
   b.apply_predication (function_instance, return_type, argument_types);
+
+  if (TARGET_XTHEADVECTOR && !check_type (return_type, argument_types))
+    return;
+
   b.add_overloaded_function (function_instance, *group.shape);
   b.add_unique_function (function_instance, (*group.shape), return_type,
             argument_types);
diff --git a/gcc/config/riscv/riscv-vector-switch.def b/gcc/config/riscv/riscv-vector-switch.def
index 1ad26c2a3b2..452283b7416 100644
--- a/gcc/config/riscv/riscv-vector-switch.def
+++ b/gcc/config/riscv/riscv-vector-switch.def
@@ -68,9 +68,9 @@ Encode the ratio of SEW/LMUL into the mask types.
 #endif

 /* Disable modes if TARGET_MIN_VLEN == 32.  */
-ENTRY (RVVMF64BI, TARGET_MIN_VLEN > 32, LMUL_F8, 64)
-ENTRY (RVVMF32BI, true, LMUL_F4, 32)
-ENTRY (RVVMF16BI, true, LMUL_F2, 16)
+ENTRY (RVVMF64BI, TARGET_MIN_VLEN > 32, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F8, 64)
+ENTRY (RVVMF32BI, true, TARGET_XTHEADVECTOR ? LMUL_1 :LMUL_F4, 32)
+ENTRY (RVVMF16BI, true, TARGET_XTHEADVECTOR ? LMUL_1 : LMUL_F2 , 16)
 ENTRY (RVVMF8BI, true, LMUL_1, 8)
 ENTRY (RVVMF4BI, true, LMUL_2, 4)
 ENTRY (RVVMF2BI, true, LMUL_4, 2)
@@ -81,39 +81,39 @@ ENTRY (RVVM8QI, true, LMUL_8, 1)
 ENTRY (RVVM4QI, true, LMUL_4, 2)
 ENTRY (RVVM2QI, true, LMUL_2, 4)
 ENTRY (RVVM1QI, true, LMUL_1, 8)
-ENTRY (RVVMF2QI, true, LMUL_F2, 16)
-ENTRY (RVVMF4QI, true, LMUL_F4, 32)
-ENTRY (RVVMF8QI, TARGET_MIN_VLEN > 32, LMUL_F8, 64)
+ENTRY (RVVMF2QI, !TARGET_XTHEADVECTOR, LMUL_F2, 16)
+ENTRY (RVVMF4QI, !TARGET_XTHEADVECTOR, LMUL_F4, 32)
+ENTRY (RVVMF8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F8, 64)

 /* Disable modes if TARGET_MIN_VLEN == 32.  */
 ENTRY (RVVM8HI, true, LMUL_8, 2)
 ENTRY (RVVM4HI, true, LMUL_4, 4)
 ENTRY (RVVM2HI, true, LMUL_2, 8)
 ENTRY (RVVM1HI, true, LMUL_1, 16)
-ENTRY (RVVMF2HI, true, LMUL_F2, 32)
-ENTRY (RVVMF4HI, TARGET_MIN_VLEN > 32, LMUL_F4, 64)
+ENTRY (RVVMF2HI, !TARGET_XTHEADVECTOR, LMUL_F2, 32)
+ENTRY (RVVMF4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)

 /* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_FP_16.  */
 ENTRY (RVVM8HF, TARGET_VECTOR_ELEN_FP_16, LMUL_8, 2)
 ENTRY (RVVM4HF, TARGET_VECTOR_ELEN_FP_16, LMUL_4, 4)
 ENTRY (RVVM2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_2, 8)
 ENTRY (RVVM1HF, TARGET_VECTOR_ELEN_FP_16, LMUL_1, 16)
-ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16, LMUL_F2, 32)
-ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, LMUL_F4, 64)
+ENTRY (RVVMF2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, LMUL_F2, 32)
+ENTRY (RVVMF4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F4, 64)

 /* Disable modes if TARGET_MIN_VLEN == 32.  */
 ENTRY (RVVM8SI, true, LMUL_8, 4)
 ENTRY (RVVM4SI, true, LMUL_4, 8)
 ENTRY (RVVM2SI, true, LMUL_2, 16)
 ENTRY (RVVM1SI, true, LMUL_1, 32)
-ENTRY (RVVMF2SI, TARGET_MIN_VLEN > 32, LMUL_F2, 64)
+ENTRY (RVVMF2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)

 /* Disable modes if TARGET_MIN_VLEN == 32 or !TARGET_VECTOR_ELEN_FP_32.  */
 ENTRY (RVVM8SF, TARGET_VECTOR_ELEN_FP_32, LMUL_8, 4)
 ENTRY (RVVM4SF, TARGET_VECTOR_ELEN_FP_32, LMUL_4, 8)
 ENTRY (RVVM2SF, TARGET_VECTOR_ELEN_FP_32, LMUL_2, 16)
 ENTRY (RVVM1SF, TARGET_VECTOR_ELEN_FP_32, LMUL_1, 32)
-ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, LMUL_F2, 64)
+ENTRY (RVVMF2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, LMUL_F2, 64)

 /* Disable modes if !TARGET_VECTOR_ELEN_64.  */
 ENTRY (RVVM8DI, TARGET_VECTOR_ELEN_64, LMUL_8, 8)
@@ -140,127 +140,127 @@ ENTRY (RVVM1DF, TARGET_VECTOR_ELEN_FP_64, LMUL_1, 64)
 #endif

 TUPLE_ENTRY (RVVM1x8QI, true, RVVM1QI, 8, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x8QI, true, RVVMF2QI, 8, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x8QI, true, RVVMF4QI, 8, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x8QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 8, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x8QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 8, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x8QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 8, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x8QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 8, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM1x7QI, true, RVVM1QI, 7, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x7QI, true, RVVMF2QI, 7, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x7QI, true, RVVMF4QI, 7, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x7QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 7, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x7QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 7, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x7QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 7, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x7QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 7, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM1x6QI, true, RVVM1QI, 6, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x6QI, true, RVVMF2QI, 6, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x6QI, true, RVVMF4QI, 6, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x6QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 6, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x6QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 6, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x6QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 6, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x6QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 6, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM1x5QI, true, RVVM1QI, 5, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x5QI, true, RVVMF2QI, 5, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x5QI, true, RVVMF4QI, 5, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x5QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 5, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x5QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 5, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x5QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 5, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x5QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 5, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM2x4QI, true, RVVM2QI, 4, LMUL_2, 4)
 TUPLE_ENTRY (RVVM1x4QI, true, RVVM1QI, 4, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x4QI, true, RVVMF2QI, 4, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x4QI, true, RVVMF4QI, 4, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x4QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 4, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x4QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 4, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x4QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 4, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x4QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 4, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM2x3QI, true, RVVM2QI, 3, LMUL_2, 4)
 TUPLE_ENTRY (RVVM1x3QI, true, RVVM1QI, 3, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x3QI, true, RVVMF2QI, 3, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x3QI, true, RVVMF4QI, 3, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x3QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 3, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x3QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 3, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x3QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 3, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x3QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 3, LMUL_F8, 64)
 TUPLE_ENTRY (RVVM4x2QI, true, RVVM4QI, 2, LMUL_4, 2)
 TUPLE_ENTRY (RVVM2x2QI, true, RVVM2QI, 2, LMUL_2, 4)
 TUPLE_ENTRY (RVVM1x2QI, true, RVVM1QI, 2, LMUL_1, 8)
-TUPLE_ENTRY (RVVMF2x2QI, true, RVVMF2QI, 2, LMUL_F2, 16)
-TUPLE_ENTRY (RVVMF4x2QI, true, RVVMF4QI, 2, LMUL_F4, 32)
-TUPLE_ENTRY (RVVMF8x2QI, TARGET_MIN_VLEN > 32, RVVMF8QI, 2, LMUL_F8, 64)
+TUPLE_ENTRY (RVVMF2x2QI, !TARGET_XTHEADVECTOR, RVVMF2QI, 2, LMUL_F2, 16)
+TUPLE_ENTRY (RVVMF4x2QI, !TARGET_XTHEADVECTOR, RVVMF4QI, 2, LMUL_F4, 32)
+TUPLE_ENTRY (RVVMF8x2QI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF8QI, 2, LMUL_F8, 64)

 TUPLE_ENTRY (RVVM1x8HI, true, RVVM1HI, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8HI, true, RVVMF2HI, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x8HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x8HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 8, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x7HI, true, RVVM1HI, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7HI, true, RVVMF2HI, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x7HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x7HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 7, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x6HI, true, RVVM1HI, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6HI, true, RVVMF2HI, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x6HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x6HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 6, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x5HI, true, RVVM1HI, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5HI, true, RVVMF2HI, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x5HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x5HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 5, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM2x4HI, true, RVVM2HI, 4, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x4HI, true, RVVM1HI, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4HI, true, RVVMF2HI, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x4HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x4HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 4, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM2x3HI, true, RVVM2HI, 3, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x3HI, true, RVVM1HI, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3HI, true, RVVMF2HI, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x3HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x3HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 3, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM4x2HI, true, RVVM4HI, 2, LMUL_4, 4)
 TUPLE_ENTRY (RVVM2x2HI, true, RVVM2HI, 2, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x2HI, true, RVVM1HI, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2HI, true, RVVMF2HI, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HI, TARGET_MIN_VLEN > 32, RVVMF4HI, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x2HI, !TARGET_XTHEADVECTOR, RVVMF2HI, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x2HI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HI, 2, LMUL_F4, 64)

 TUPLE_ENTRY (RVVM1x8HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 8, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 8, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x8HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x8HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 8, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x7HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 7, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 7, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x7HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x7HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 7, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x6HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 6, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 6, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x6HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x6HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 6, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM1x5HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 5, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 5, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x5HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x5HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 5, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 4, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x4HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 4, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 4, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x4HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x4HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 4, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 3, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x3HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 3, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 3, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x3HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x3HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 3, LMUL_F4, 64)
 TUPLE_ENTRY (RVVM4x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM4HF, 2, LMUL_4, 4)
 TUPLE_ENTRY (RVVM2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM2HF, 2, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x2HF, TARGET_VECTOR_ELEN_FP_16, RVVM1HF, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16, RVVMF2HF, 2, LMUL_F2, 32)
-TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32, RVVMF4HF, 2, LMUL_F4, 64)
+TUPLE_ENTRY (RVVMF2x2HF, TARGET_VECTOR_ELEN_FP_16 && !TARGET_XTHEADVECTOR, RVVMF2HF, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF4x2HF, TARGET_VECTOR_ELEN_FP_16 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF4HF, 2, LMUL_F4, 64)

 TUPLE_ENTRY (RVVM1x8SI, true, RVVM1SI, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, RVVMF2SI, 8, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x7SI, true, RVVM1SI, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SI, (TARGET_MIN_VLEN > 32) && !TARGET_XTHEADVECTOR, RVVMF2SI, 7, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x6SI, true, RVVM1SI, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 6, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x5SI, true, RVVM1SI, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 5, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM2x4SI, true, RVVM2SI, 4, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x4SI, true, RVVM1SI, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 4, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM2x3SI, true, RVVM2SI, 3, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x3SI, true, RVVM1SI, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 3, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM4x2SI, true, RVVM4SI, 2, LMUL_4, 4)
 TUPLE_ENTRY (RVVM2x2SI, true, RVVM2SI, 2, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x2SI, true, RVVM1SI, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SI, TARGET_MIN_VLEN > 32, RVVMF2SI, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SI, TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SI, 2, LMUL_F2, 32)

 TUPLE_ENTRY (RVVM1x8SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 8, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 8, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x8SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 8, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x7SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 7, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 7, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x7SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 7, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x6SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 6, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 6, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x6SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 6, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM1x5SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 5, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 5, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x5SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 5, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM2x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 4, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x4SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 4, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 4, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x4SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 4, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM2x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 3, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x3SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 3, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 3, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x3SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 3, LMUL_F2, 32)
 TUPLE_ENTRY (RVVM4x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM4SF, 2, LMUL_4, 4)
 TUPLE_ENTRY (RVVM2x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM2SF, 2, LMUL_2, 8)
 TUPLE_ENTRY (RVVM1x2SF, TARGET_VECTOR_ELEN_FP_32, RVVM1SF, 2, LMUL_1, 16)
-TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32, RVVMF2SF, 2, LMUL_F2, 32)
+TUPLE_ENTRY (RVVMF2x2SF, TARGET_VECTOR_ELEN_FP_32 && TARGET_MIN_VLEN > 32 && !TARGET_XTHEADVECTOR, RVVMF2SF, 2, LMUL_F2, 32)

 TUPLE_ENTRY (RVVM1x8DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 8, LMUL_1, 16)
 TUPLE_ENTRY (RVVM1x7DI, TARGET_VECTOR_ELEN_64, RVVM1DI, 7, LMUL_1, 16)
diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index 8f74cd2e502..28cf1f414e9 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -1406,6 +1406,9 @@ riscv_v_adjust_bytesize (machine_mode mode, int scale)
 {
   if (riscv_v_ext_vector_mode_p (mode))
     {
+      if (TARGET_XTHEADVECTOR)
+   return BYTES_PER_RISCV_VECTOR;
+
       poly_int64 nunits = GET_MODE_NUNITS (mode);
       poly_int64 mode_size = GET_MODE_SIZE (mode);

@@ -9995,7 +9998,7 @@ riscv_use_divmod_expander (void)
 static machine_mode
 riscv_preferred_simd_mode (scalar_mode mode)
 {
-  if (TARGET_VECTOR)
+  if (TARGET_VECTOR && !TARGET_XTHEADVECTOR)
     return riscv_vector::preferred_simd_mode (mode);

   return word_mode;
@@ -10346,7 +10349,7 @@ riscv_mode_priority (int, int n)
 unsigned int
 riscv_autovectorize_vector_modes (vector_modes *modes, bool all)
 {
-  if (TARGET_VECTOR)
+  if (TARGET_VECTOR && !TARGET_XTHEADVECTOR)
     return riscv_vector::autovectorize_vector_modes (modes, all);

   return default_autovectorize_vector_modes (modes, all);
@@ -10529,6 +10532,16 @@ extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset)
   return false;
 }

+/* Implements target hook vector_mode_supported_any_target_p.  */
+
+static bool
+riscv_vector_mode_supported_any_target_p (machine_mode mode)
+{
+  if (TARGET_XTHEADVECTOR)
+    return false;
+  return true;
+}
+
 /* Initialize the GCC target structure.  */
 #undef TARGET_ASM_ALIGNED_HI_OP
 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
@@ -10872,6 +10885,9 @@ extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset)
 #undef TARGET_PREFERRED_ELSE_VALUE
 #define TARGET_PREFERRED_ELSE_VALUE riscv_preferred_else_value

+#undef TARGET_VECTOR_MODE_SUPPORTED_ANY_TARGET_P
+#define TARGET_VECTOR_MODE_SUPPORTED_ANY_TARGET_P riscv_vector_mode_supported_any_target_p
+
 struct gcc_target targetm = TARGET_INITIALIZER;

 #include "gt-riscv.h"
diff --git a/gcc/config/riscv/riscv_th_vector.h b/gcc/config/riscv/riscv_th_vector.h
new file mode 100644
index 00000000000..b6b6738bdda
--- /dev/null
+++ b/gcc/config/riscv/riscv_th_vector.h
@@ -0,0 +1,49 @@
+/* RISC-V 'XTheadVector' Extension intrinsics include file.
+   Copyright (C) 2024 Free Software Foundation, Inc.
+
+   This file is part of GCC.
+
+   GCC is free software; you can redistribute it and/or modify it
+   under the terms of the GNU General Public License as published
+   by the Free Software Foundation; either version 3, or (at your
+   option) any later version.
+
+   GCC is distributed in the hope that it will be useful, but WITHOUT
+   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
+   License for more details.
+
+   Under Section 7 of GPL version 3, you are granted additional
+   permissions described in the GCC Runtime Library Exception, version
+   3.1, as published by the Free Software Foundation.
+
+   You should have received a copy of the GNU General Public License and
+   a copy of the GCC Runtime Library Exception along with this program;
+   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+   <" target="_blank">" target="_blank">" target="_blank">http://www.gnu.org/licenses/>.  */
+
+#ifndef __RISCV_TH_VECTOR_H
+#define __RISCV_TH_VECTOR_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+#ifndef __riscv_xtheadvector
+#error "XTheadVector intrinsics require the xtheadvector extension."
+#else
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* NOTE: This implementation of riscv_th_vector.h is intentionally short.  It does
+   not define the RVV types and intrinsic functions directly in C and C++
+   code, but instead uses the following pragma to tell GCC to insert the
+   necessary type and function definitions itself.  The net effect is the
+   same, and the file is a complete implementation of riscv_th_vector.h.  */
+#pragma riscv intrinsic "xtheadvector"
+
+#ifdef __cplusplus
+}
+#endif // __cplusplus
+#endif // __riscv_xtheadvector
+#endif // __RISCV_TH_ECTOR_H
diff --git a/gcc/config/riscv/thead-vector.md b/gcc/config/riscv/thead-vector.md
new file mode 100644
index 00000000000..696b815252d
--- /dev/null
+++ b/gcc/config/riscv/thead-vector.md
@@ -0,0 +1,102 @@
+(define_c_enum "unspec" [
+  UNSPEC_TH_VWLDST
+])
+
+(define_mode_iterator V_VLS_VT [V VLS VT])
+(define_mode_iterator V_VB_VLS_VT [V VB VLS VT])
+
+(define_split
+  [(set (match_operand:V_VB_VLS_VT 0 "reg_or_mem_operand")
+   (match_operand:V_VB_VLS_VT 1 "reg_or_mem_operand"))]
+  "TARGET_XTHEADVECTOR"
+  [(const_int 0)]
+  {
+    emit_insn (gen_pred_th_whole_mov (<MODE>mode, operands[0], operands[1],
+                     RVV_VLMAX, GEN_INT(riscv_vector::VLMAX)));
+    DONE;
+  })
+
+(define_insn_and_split "@pred_th_whole_mov<mode>"
+  [(set (match_operand:V_VLS_VT 0 "reg_or_mem_operand"  "=vr,vr, m")
+   (unspec:V_VLS_VT
+     [(match_operand:V_VLS_VT 1 "reg_or_mem_operand" " vr, m,vr")
+      (match_operand 2 "vector_length_operand"   " rK, rK, rK")
+      (match_operand 3 "const_1_operand"         "  i, i, i")
+      (reg:SI VL_REGNUM)
+      (reg:SI VTYPE_REGNUM)]
+   UNSPEC_TH_VWLDST))]
+  "TARGET_XTHEADVECTOR"
+  "@
+   vmv.v.v\t%0,%1
+   vle.v\t%0,%1
+   vse.v\t%1,%0"
+  "&& REG_P (operands[0]) && REG_P (operands[1])
+   && REGNO (operands[0]) == REGNO (operands[1])"
+  [(const_int 0)]
+  ""
+  [(set_attr "type" "vimov,vlds,vlds")
+   (set_attr "mode" "<MODE>")
+   (set (attr "ta") (symbol_ref "riscv_vector::TAIL_UNDISTURBED"))
+   (set (attr "ma") (symbol_ref "riscv_vector::MASK_UNDISTURBED"))
+   (set (attr "avl_type_idx") (const_int 3))
+   (set_attr "vl_op_idx" "2")])
+
+(define_insn_and_split "@pred_th_whole_mov<mode>"
+  [(set (match_operand:VB 0 "reg_or_mem_operand"  "=vr,vr, m")
+   (unspec:VB
+     [(match_operand:VB 1 "reg_or_mem_operand" " vr, m,vr")
+      (match_operand 2 "vector_length_operand"   " rK, rK, rK")
+      (match_operand 3 "const_1_operand"         "  i, i, i")
+      (reg:SI VL_REGNUM)
+      (reg:SI VTYPE_REGNUM)]
+   UNSPEC_TH_VWLDST))]
+  "TARGET_XTHEADVECTOR"
+  "@
+   vmv.v.v\t%0,%1
+   vle.v\t%0,%1
+   vse.v\t%1,%0"
+  "&& REG_P (operands[0]) && REG_P (operands[1])
+   && REGNO (operands[0]) == REGNO (operands[1])"
+  [(const_int 0)]
+  ""
+  [(set_attr "type" "vimov,vlds,vlds")
+   (set_attr "mode" "<MODE>")
+   (set (attr "ta") (symbol_ref "riscv_vector::TAIL_UNDISTURBED"))
+   (set (attr "ma") (symbol_ref "riscv_vector::MASK_UNDISTURBED"))
+   (set (attr "avl_type_idx") (const_int 3))
+   (set_attr "vl_op_idx" "2")
+   (set (attr "sew") (const_int 8))
+   (set (attr "vlmul") (symbol_ref "riscv_vector::LMUL_1"))])
+
+(define_insn_and_split "*pred_th_mov<mode>"
+  [(set (match_operand:VB_VLS 0 "nonimmediate_operand"               "=vr,   m,  vr,  vr,  vr")
+   (if_then_else:VB_VLS
+     (unspec:VB_VLS
+       [(match_operand:VB_VLS 1 "vector_all_trues_mask_operand" "Wc1, Wc1, Wc1, Wc1, Wc1")
+        (match_operand 4 "vector_length_operand"            " rK,  rK,  rK,  rK,  rK")
+        (match_operand 5 "const_int_operand"                "  i,   i,   i,   i,   i")
+        (reg:SI VL_REGNUM)
+        (reg:SI VTYPE_REGNUM)] UNSPEC_VPREDICATE)
+     (match_operand:VB_VLS 3 "vector_move_operand"              "  m,  vr,  vr, Wc0, Wc1")
+     (match_operand:VB_VLS 2 "vector_undef_operand"             " vu,  vu,  vu,  vu,  vu")))]
+  "TARGET_XTHEADVECTOR"
+  "@
+   #
+   #
+   vmcpy.m\t%0,%3
+   vmclr.m\t%0
+   vmset.m\t%0"
+  "&& !reload_completed"
+  [(const_int 0)]
+  {
+    if ((MEM_P (operands[0]) || MEM_P (operands[3]))
+        || (REG_P (operands[0]) && REG_P (operands[3])
+       && INTVAL (operands[5]) == riscv_vector::VLMAX))
+      {
+   emit_move_insn (operands[0], operands[3]);
+   DONE;
+      }
+    FAIL;
+  }
+  [(set_attr "type" "vldm,vstm,vmalu,vmalu,vmalu")
+   (set_attr "mode" "<MODE>")])
diff --git a/gcc/config/riscv/thead.cc b/gcc/config/riscv/thead.cc
index 00b2e14c6c0..cc04101b256 100644
--- a/gcc/config/riscv/thead.cc
+++ b/gcc/config/riscv/thead.cc
@@ -27,6 +27,7 @@
 #include "backend.h"
 #include "tree.h"
 #include "rtl.h"
+#include "insn-attr.h"
 #include "explow.h"
 #include "memmodel.h"
 #include "emit-rtl.h"
@@ -890,8 +891,26 @@ th_asm_output_opcode (FILE *asm_out_file, const char *p)
 {
   /* We need to add th. prefix to all the xtheadvector
      instructions here.*/
-  if (current_output_insn != NULL && p[0] == 'v')
-    fputs ("th.", asm_out_file);
+  if (current_output_insn != NULL)
+    {
+      if (get_attr_type (current_output_insn) == TYPE_VSETVL)
+   {
+     if (strstr (p, "zero"))
+       {
+         if (strstr (p, "zero,zero"))
+       return "th.vsetvli\tzero,zero,e%0,%m1";
+         else
+       return "th.vsetvli\tzero,%0,e%1,%m2";
+       }
+     else
+       {
+         return "th.vsetvli\t%0,%1,e%2,%m3";
+       }
+   }
+
+      if (p[0] == 'v')
+   fputs ("th.", asm_out_file);
+    }

   return p;
 }
diff --git a/gcc/config/riscv/vector.md b/gcc/config/riscv/vector.md
index 24b7b4394be..3eb6daafbc2 100644
--- a/gcc/config/riscv/vector.md
+++ b/gcc/config/riscv/vector.md
@@ -87,7 +87,7 @@
 ;; check. However, we need default value of SEW for vsetvl instruction since there
 ;; is no field for ratio in the vsetvl instruction encoding.
 (define_attr "sew" ""
-  (cond [(eq_attr "mode" "RVVMF64BI,RVVMF32BI,RVVMF16BI,RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,\
+  (cond [(eq_attr "mode" "RVVMF8BI,RVVMF4BI,RVVMF2BI,RVVM1BI,\
              RVVM8QI,RVVM4QI,RVVM2QI,RVVM1QI,RVVMF2QI,RVVMF4QI,RVVMF8QI,\
              RVVM1x8QI,RVVMF2x8QI,RVVMF4x8QI,RVVMF8x8QI,\
              RVVM1x7QI,RVVMF2x7QI,RVVMF4x7QI,RVVMF8x7QI,\
@@ -99,6 +99,18 @@
              V1QI,V2QI,V4QI,V8QI,V16QI,V32QI,V64QI,V128QI,V256QI,V512QI,V1024QI,V2048QI,V4096QI,\
              V1BI,V2BI,V4BI,V8BI,V16BI,V32BI,V64BI,V128BI,V256BI,V512BI,V1024BI,V2048BI,V4096BI")
     (const_int 8)
+    (eq_attr "mode" "RVVMF16BI")
+      (if_then_else (match_test "TARGET_XTHEADVECTOR")
+        (const_int 16)
+        (const_int 8))
+    (eq_attr "mode" "RVVMF32BI")
+      (if_then_else (match_test "TARGET_XTHEADVECTOR")
+        (const_int 32)
+        (const_int 8))
+    (eq_attr "mode" "RVVMF64BI")
+      (if_then_else (match_test "TARGET_XTHEADVECTOR")
+        (const_int 64)
+        (const_int 8))
     (eq_attr "mode" "RVVM8HI,RVVM4HI,RVVM2HI,RVVM1HI,RVVMF2HI,RVVMF4HI,\
              RVVM1x8HI,RVVMF2x8HI,RVVMF4x8HI,\
              RVVM1x7HI,RVVMF2x7HI,RVVMF4x7HI,\
@@ -159,9 +171,9 @@
     (eq_attr "mode" "RVVM4QI,RVVMF2BI") (symbol_ref "riscv_vector::LMUL_4")
     (eq_attr "mode" "RVVM2QI,RVVMF4BI") (symbol_ref "riscv_vector::LMUL_2")
     (eq_attr "mode" "RVVM1QI,RVVMF8BI") (symbol_ref "riscv_vector::LMUL_1")
-    (eq_attr "mode" "RVVMF2QI,RVVMF16BI") (symbol_ref "riscv_vector::LMUL_F2")
-    (eq_attr "mode" "RVVMF4QI,RVVMF32BI") (symbol_ref "riscv_vector::LMUL_F4")
-    (eq_attr "mode" "RVVMF8QI,RVVMF64BI") (symbol_ref "riscv_vector::LMUL_F8")
+    (eq_attr "mode" "RVVMF2QI,RVVMF16BI") (symbol_ref "TARGET_XTHEADVECTOR ? riscv_vector::LMUL_1 : riscv_vector::LMUL_F2")
+    (eq_attr "mode" "RVVMF4QI,RVVMF32BI") (symbol_ref "TARGET_XTHEADVECTOR ? riscv_vector::LMUL_1 : riscv_vector::LMUL_F4")
+    (eq_attr "mode" "RVVMF8QI,RVVMF64BI") (symbol_ref "TARGET_XTHEADVECTOR ? riscv_vector::LMUL_1 : riscv_vector::LMUL_F8")
     (eq_attr "mode" "RVVM8HI") (symbol_ref "riscv_vector::LMUL_8")
     (eq_attr "mode" "RVVM4HI") (symbol_ref "riscv_vector::LMUL_4")
     (eq_attr "mode" "RVVM2HI") (symbol_ref "riscv_vector::LMUL_2")
@@ -436,6 +448,10 @@
              vaeskf1,vaeskf2,vaesz,vsha2ms,vsha2ch,vsha2cl,vsm4k,vsm4r,\
              vsm3me,vsm3c")
       (const_int INVALID_ATTRIBUTE)
+   (and (eq_attr "type" "vlde,vste,vlsegde,vssegte,vlsegds,vssegts,\
+                  vlsegdff,vssegtux,vlsegdox,vlsegdux")
+         (match_test "TARGET_XTHEADVECTOR"))
+      (const_int INVALID_ATTRIBUTE)
     (eq_attr "mode" "RVVM8QI,RVVM1BI") (const_int 1)
     (eq_attr "mode" "RVVM4QI,RVVMF2BI") (const_int 2)
     (eq_attr "mode" "RVVM2QI,RVVMF4BI") (const_int 4)
@@ -905,6 +921,8 @@
     (symbol_ref "riscv_vector::FRM_DYN")]
    (symbol_ref "riscv_vector::FRM_NONE")))

+(include "thead-vector.md")
+
 ;; -----------------------------------------------------------------
 ;; ---- Miscellaneous Operations
 ;; -----------------------------------------------------------------
@@ -1078,6 +1096,12 @@
        - We can not leave it to TARGET_SECONDARY_RELOAD since it happens
     before spilling. The clobber scratch is used by spilling fractional
     registers in IRA/LRA so it's too early.  */
+  if (TARGET_XTHEADVECTOR)
+    {
+      emit_insn (gen_pred_th_whole_mov (<MODE>mode, operands[0], operands[1],
+                   RVV_VLMAX, GEN_INT(riscv_vector::VLMAX)));
+      DONE;
+    }

   if (riscv_vector::legitimize_move (operands[0], &operands[1]))
     DONE;
@@ -1114,7 +1138,7 @@
 (define_insn "*mov<mode>_whole"
   [(set (match_operand:V_WHOLE 0 "reg_or_mem_operand" "=vr, m,vr")
    (match_operand:V_WHOLE 1 "reg_or_mem_operand" "  m,vr,vr"))]
-  "TARGET_VECTOR"
+  "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
   "@
    vl%m1re<sew>.v\t%0,%1
    vs%m1r.v\t%1,%0
@@ -1135,6 +1159,13 @@
    (match_operand:VB 1 "general_operand"))]
   "TARGET_VECTOR"
 {
+  if (TARGET_XTHEADVECTOR)
+    {
+      emit_insn (gen_pred_th_whole_mov (<MODE>mode, operands[0], operands[1],
+                   RVV_VLMAX, GEN_INT(riscv_vector::VLMAX)));
+      DONE;
+    }
+
   if (riscv_vector::legitimize_move (operands[0], &operands[1]))
     DONE;
 })
@@ -1142,7 +1173,7 @@
 (define_insn "*mov<mode>"
   [(set (match_operand:VB 0 "register_operand" "=vr")
    (match_operand:VB 1 "register_operand" " vr"))]
-  "TARGET_VECTOR"
+  "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
   "vmv1r.v\t%0,%1"
   [(set_attr "type" "vmov")
    (set_attr "mode" "<MODE>")])
@@ -3692,7 +3723,7 @@
      (any_extend:VWEXTI
        (match_operand:<V_DOUBLE_TRUNC> 3 "register_operand"   "W21,W21,W21,W21,W42,W42,W42,W42,W84,W84,W84,W84,   vr,   vr"))
      (match_operand:VWEXTI 2 "vector_merge_operand"           " vu, vu,  0,  0, vu, vu,  0,  0, vu, vu,  0,  0,   vu,    0")))]
-  "TARGET_VECTOR"
+  "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
   "v<sz>ext.vf2\t%0,%3%p1"
   [(set_attr "type" "vext")
    (set_attr "mode" "<MODE>")
@@ -3713,7 +3744,7 @@
      (any_extend:VQEXTI
        (match_operand:<V_QUAD_TRUNC> 3 "register_operand"   "W43,W43,W43,W43,W86,W86,W86,W86,   vr,   vr"))
      (match_operand:VQEXTI 2 "vector_merge_operand"         " vu, vu,  0,  0, vu, vu,  0,  0,   vu,    0")))]
-  "TARGET_VECTOR"
+  "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
   "v<sz>ext.vf4\t%0,%3%p1"
   [(set_attr "type" "vext")
    (set_attr "mode" "<MODE>")
@@ -3734,7 +3765,7 @@
      (any_extend:VOEXTI
        (match_operand:<V_OCT_TRUNC> 3 "register_operand"   "W87,W87,W87,W87,   vr,   vr"))
      (match_operand:VOEXTI 2 "vector_merge_operand"        " vu, vu,  0,  0,   vu,    0")))]
-  "TARGET_VECTOR"
+  "TARGET_VECTOR && !TARGET_XTHEADVECTOR"
   "v<sz>ext.vf8\t%0,%3%p1"
   [(set_attr "type" "vext")
    (set_attr "mode" "<MODE>")
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-1.c
index 2e0e12aa045..2eef9e1e1a8 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/abi-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/abi-1.c
@@ -1,4 +1,4 @@
-/* { dg-do compile } */
+/* { dg-do compile { target { ! riscv_xtheadvector } } } */
 /* { dg-skip-if "test rvv intrinsic" { *-*-* } { "*" } { "-march=rv*v*" } } */

 void foo0 () {__rvv_bool64_t t;}
diff --git a/gcc/testsuite/gcc.target/riscv/rvv/base/pragma-1.c b/gcc/testsuite/gcc.target/riscv/rvv/base/pragma-1.c
index 3d81b179235..ef329e30785 100644
--- a/gcc/testsuite/gcc.target/riscv/rvv/base/pragma-1.c
+++ b/gcc/testsuite/gcc.target/riscv/rvv/base/pragma-1.c
@@ -1,4 +1,4 @@
 /* { dg-do compile } */
 /* { dg-options "-O3 -march=rv32gc -mabi=ilp32d" } */

-#pragma riscv intrinsic "vector" /* { dg-error {#pragma riscv intrinsic' option 'vector' needs 'V' extension enabled} } */
+#pragma riscv intrinsic "vector" /* { dg-error {#pragma riscv intrinsic' option 'vector' needs 'V' or 'XTHEADVECTOR' extension enabled} } */
diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
index b27c30b8c51..bf0ad5342d6 100644
--- a/gcc/testsuite/lib/target-supports.exp
+++ b/gcc/testsuite/lib/target-supports.exp
@@ -1956,6 +1956,18 @@ proc check_effective_target_riscv_zbb { } {
     }]
 }

+# Return 1 if the target arch supports the XTheadVector extension, 0 otherwise.
+# Cache the result.
+
+proc check_effective_target_riscv_xtheadvector { } {
+    return [check_no_compiler_messages riscv_ext_xtheadvector assembly {
+       #ifndef __riscv_xtheadvector
+       #error "Not __riscv_xtheadvector"
+       #endif
+    }]
+}
+
+
 # Return 1 if we can execute code when using dg-add-options riscv_v

 proc check_effective_target_riscv_v_ok { } {

Additional information