@@ -69,12 +69,12 @@ struct vmw_user_fence {
69
69
* be assigned the current time tv_usec val when the fence signals.
70
70
*/
71
71
struct vmw_event_fence_action {
72
- struct drm_pending_event e ;
73
72
struct vmw_fence_action action ;
73
+
74
+ struct drm_pending_event * event ;
74
75
struct vmw_fence_obj * fence ;
75
76
struct drm_device * dev ;
76
- struct kref kref ;
77
- uint32_t size ;
77
+
78
78
uint32_t * tv_sec ;
79
79
uint32_t * tv_usec ;
80
80
};
@@ -783,49 +783,6 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
783
783
TTM_REF_USAGE );
784
784
}
785
785
786
- /**
787
- * vmw_event_fence_action_destroy
788
- *
789
- * @kref: The struct kref embedded in a struct vmw_event_fence_action.
790
- *
791
- * The vmw_event_fence_action destructor that may be called either after
792
- * the fence action cleanup, or when the event is delivered.
793
- * It frees both the vmw_event_fence_action struct and the actual
794
- * event structure copied to user-space.
795
- */
796
- static void vmw_event_fence_action_destroy (struct kref * kref )
797
- {
798
- struct vmw_event_fence_action * eaction =
799
- container_of (kref , struct vmw_event_fence_action , kref );
800
- struct ttm_mem_global * mem_glob =
801
- vmw_mem_glob (vmw_priv (eaction -> dev ));
802
- uint32_t size = eaction -> size ;
803
-
804
- kfree (eaction -> e .event );
805
- kfree (eaction );
806
- ttm_mem_global_free (mem_glob , size );
807
- }
808
-
809
-
810
- /**
811
- * vmw_event_fence_action_delivered
812
- *
813
- * @e: The struct drm_pending_event embedded in a struct
814
- * vmw_event_fence_action.
815
- *
816
- * The struct drm_pending_event destructor that is called by drm
817
- * once the event is delivered. Since we don't know whether this function
818
- * will be called before or after the fence action destructor, we
819
- * free a refcount and destroy if it becomes zero.
820
- */
821
- static void vmw_event_fence_action_delivered (struct drm_pending_event * e )
822
- {
823
- struct vmw_event_fence_action * eaction =
824
- container_of (e , struct vmw_event_fence_action , e );
825
-
826
- kref_put (& eaction -> kref , vmw_event_fence_action_destroy );
827
- }
828
-
829
786
830
787
/**
831
788
* vmw_event_fence_action_seq_passed
@@ -836,18 +793,16 @@ static void vmw_event_fence_action_delivered(struct drm_pending_event *e)
836
793
* This function is called when the seqno of the fence where @action is
837
794
* attached has passed. It queues the event on the submitter's event list.
838
795
* This function is always called from atomic context, and may be called
839
- * from irq context. It ups a refcount reflecting that we now have two
840
- * destructors.
796
+ * from irq context.
841
797
*/
842
798
static void vmw_event_fence_action_seq_passed (struct vmw_fence_action * action )
843
799
{
844
800
struct vmw_event_fence_action * eaction =
845
801
container_of (action , struct vmw_event_fence_action , action );
846
802
struct drm_device * dev = eaction -> dev ;
847
- struct drm_file * file_priv = eaction -> e . file_priv ;
803
+ struct drm_file * file_priv = eaction -> event -> file_priv ;
848
804
unsigned long irq_flags ;
849
805
850
- kref_get (& eaction -> kref );
851
806
spin_lock_irqsave (& dev -> event_lock , irq_flags );
852
807
853
808
if (likely (eaction -> tv_sec != NULL )) {
@@ -858,7 +813,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
858
813
* eaction -> tv_usec = tv .tv_usec ;
859
814
}
860
815
861
- list_add_tail (& eaction -> e . link , & file_priv -> event_list );
816
+ list_add_tail (& eaction -> event -> link , & file_priv -> event_list );
862
817
wake_up_all (& file_priv -> event_wait );
863
818
spin_unlock_irqrestore (& dev -> event_lock , irq_flags );
864
819
}
@@ -878,7 +833,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
878
833
container_of (action , struct vmw_event_fence_action , action );
879
834
880
835
vmw_fence_obj_unreference (& eaction -> fence );
881
- kref_put ( & eaction -> kref , vmw_event_fence_action_destroy );
836
+ kfree ( eaction );
882
837
}
883
838
884
839
@@ -946,56 +901,108 @@ void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
946
901
* an error code, the caller needs to free that object.
947
902
*/
948
903
949
- int vmw_event_fence_action_create (struct drm_file * file_priv ,
950
- struct vmw_fence_obj * fence ,
951
- struct drm_event * event ,
952
- uint32_t * tv_sec ,
953
- uint32_t * tv_usec ,
954
- bool interruptible )
904
+ int vmw_event_fence_action_queue (struct drm_file * file_priv ,
905
+ struct vmw_fence_obj * fence ,
906
+ struct drm_pending_event * event ,
907
+ uint32_t * tv_sec ,
908
+ uint32_t * tv_usec ,
909
+ bool interruptible )
955
910
{
956
911
struct vmw_event_fence_action * eaction ;
957
- struct ttm_mem_global * mem_glob =
958
- vmw_mem_glob (fence -> fman -> dev_priv );
959
912
struct vmw_fence_manager * fman = fence -> fman ;
960
- uint32_t size = fman -> event_fence_action_size +
961
- ttm_round_pot (event -> length );
962
- int ret ;
963
-
964
- /*
965
- * Account for internal structure size as well as the
966
- * event size itself.
967
- */
968
913
969
- ret = ttm_mem_global_alloc (mem_glob , size , false, interruptible );
970
- if (unlikely (ret != 0 ))
971
- return ret ;
972
914
973
915
eaction = kzalloc (sizeof (* eaction ), GFP_KERNEL );
974
- if (unlikely (eaction == NULL )) {
975
- ttm_mem_global_free (mem_glob , size );
916
+ if (unlikely (eaction == NULL ))
976
917
return - ENOMEM ;
977
- }
978
918
979
- eaction -> e .event = event ;
980
- eaction -> e .file_priv = file_priv ;
981
- eaction -> e .destroy = vmw_event_fence_action_delivered ;
919
+ eaction -> event = event ;
982
920
983
921
eaction -> action .seq_passed = vmw_event_fence_action_seq_passed ;
984
922
eaction -> action .cleanup = vmw_event_fence_action_cleanup ;
985
923
eaction -> action .type = VMW_ACTION_EVENT ;
986
924
987
925
eaction -> fence = vmw_fence_obj_reference (fence );
988
926
eaction -> dev = fman -> dev_priv -> dev ;
989
- eaction -> size = size ;
990
927
eaction -> tv_sec = tv_sec ;
991
928
eaction -> tv_usec = tv_usec ;
992
929
993
- kref_init (& eaction -> kref );
994
930
vmw_fence_obj_add_action (fence , & eaction -> action );
995
931
996
932
return 0 ;
997
933
}
998
934
935
+ struct vmw_event_fence_pending {
936
+ struct drm_pending_event base ;
937
+ struct drm_vmw_event_fence event ;
938
+ };
939
+
940
+ int vmw_event_fence_action_create (struct drm_file * file_priv ,
941
+ struct vmw_fence_obj * fence ,
942
+ uint32_t flags ,
943
+ uint64_t user_data ,
944
+ bool interruptible )
945
+ {
946
+ struct vmw_event_fence_pending * event ;
947
+ struct drm_device * dev = fence -> fman -> dev_priv -> dev ;
948
+ unsigned long irq_flags ;
949
+ int ret ;
950
+
951
+ spin_lock_irqsave (& dev -> event_lock , irq_flags );
952
+
953
+ ret = (file_priv -> event_space < sizeof (event -> event )) ? - EBUSY : 0 ;
954
+ if (likely (ret == 0 ))
955
+ file_priv -> event_space -= sizeof (event -> event );
956
+
957
+ spin_unlock_irqrestore (& dev -> event_lock , irq_flags );
958
+
959
+ if (unlikely (ret != 0 )) {
960
+ DRM_ERROR ("Failed to allocate event space for this file.\n" );
961
+ goto out_no_space ;
962
+ }
963
+
964
+
965
+ event = kzalloc (sizeof (event -> event ), GFP_KERNEL );
966
+ if (unlikely (event == NULL )) {
967
+ DRM_ERROR ("Failed to allocate an event.\n" );
968
+ ret = - ENOMEM ;
969
+ goto out_no_event ;
970
+ }
971
+
972
+ event -> event .base .type = DRM_VMW_EVENT_FENCE_SIGNALED ;
973
+ event -> event .base .length = sizeof (* event );
974
+ event -> event .user_data = user_data ;
975
+
976
+ event -> base .event = & event -> event .base ;
977
+ event -> base .file_priv = file_priv ;
978
+ event -> base .destroy = (void (* ) (struct drm_pending_event * )) kfree ;
979
+
980
+
981
+ if (flags & DRM_VMW_FE_FLAG_REQ_TIME )
982
+ ret = vmw_event_fence_action_queue (file_priv , fence ,
983
+ & event -> base ,
984
+ & event -> event .tv_sec ,
985
+ & event -> event .tv_usec ,
986
+ interruptible );
987
+ else
988
+ ret = vmw_event_fence_action_queue (file_priv , fence ,
989
+ & event -> base ,
990
+ NULL ,
991
+ NULL ,
992
+ interruptible );
993
+ if (ret != 0 )
994
+ goto out_no_queue ;
995
+
996
+ out_no_queue :
997
+ event -> base .destroy (& event -> base );
998
+ out_no_event :
999
+ spin_lock_irqsave (& dev -> event_lock , irq_flags );
1000
+ file_priv -> event_space += sizeof (* event );
1001
+ spin_unlock_irqrestore (& dev -> event_lock , irq_flags );
1002
+ out_no_space :
1003
+ return ret ;
1004
+ }
1005
+
999
1006
int vmw_fence_event_ioctl (struct drm_device * dev , void * data ,
1000
1007
struct drm_file * file_priv )
1001
1008
{
@@ -1008,8 +1015,6 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1008
1015
(struct drm_vmw_fence_rep __user * )(unsigned long )
1009
1016
arg -> fence_rep ;
1010
1017
uint32_t handle ;
1011
- unsigned long irq_flags ;
1012
- struct drm_vmw_event_fence * event ;
1013
1018
int ret ;
1014
1019
1015
1020
/*
@@ -1062,59 +1067,28 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1062
1067
1063
1068
BUG_ON (fence == NULL );
1064
1069
1065
- spin_lock_irqsave (& dev -> event_lock , irq_flags );
1066
-
1067
- ret = (file_priv -> event_space < sizeof (* event )) ? - EBUSY : 0 ;
1068
- if (likely (ret == 0 ))
1069
- file_priv -> event_space -= sizeof (* event );
1070
-
1071
- spin_unlock_irqrestore (& dev -> event_lock , irq_flags );
1072
-
1073
- if (unlikely (ret != 0 )) {
1074
- DRM_ERROR ("Failed to allocate event space for this file.\n" );
1075
- goto out_no_event_space ;
1076
- }
1077
-
1078
- event = kzalloc (sizeof (* event ), GFP_KERNEL );
1079
- if (unlikely (event == NULL )) {
1080
- DRM_ERROR ("Failed to allocate an event.\n" );
1081
- goto out_no_event ;
1082
- }
1083
-
1084
- event -> base .type = DRM_VMW_EVENT_FENCE_SIGNALED ;
1085
- event -> base .length = sizeof (* event );
1086
- event -> user_data = arg -> user_data ;
1087
-
1088
1070
if (arg -> flags & DRM_VMW_FE_FLAG_REQ_TIME )
1089
1071
ret = vmw_event_fence_action_create (file_priv , fence ,
1090
- & event -> base ,
1091
- & event -> tv_sec ,
1092
- & event -> tv_usec ,
1072
+ arg -> flags ,
1073
+ arg -> user_data ,
1093
1074
true);
1094
1075
else
1095
1076
ret = vmw_event_fence_action_create (file_priv , fence ,
1096
- & event -> base ,
1097
- NULL ,
1098
- NULL ,
1077
+ arg -> flags ,
1078
+ arg -> user_data ,
1099
1079
true);
1100
1080
1101
1081
if (unlikely (ret != 0 )) {
1102
1082
if (ret != - ERESTARTSYS )
1103
1083
DRM_ERROR ("Failed to attach event to fence.\n" );
1104
- goto out_no_attach ;
1084
+ goto out_no_create ;
1105
1085
}
1106
1086
1107
1087
vmw_execbuf_copy_fence_user (dev_priv , vmw_fp , 0 , user_fence_rep , fence ,
1108
1088
handle );
1109
1089
vmw_fence_obj_unreference (& fence );
1110
1090
return 0 ;
1111
- out_no_attach :
1112
- kfree (event );
1113
- out_no_event :
1114
- spin_lock_irqsave (& dev -> event_lock , irq_flags );
1115
- file_priv -> event_space += sizeof (* event );
1116
- spin_unlock_irqrestore (& dev -> event_lock , irq_flags );
1117
- out_no_event_space :
1091
+ out_no_create :
1118
1092
if (user_fence_rep != NULL )
1119
1093
ttm_ref_object_base_unref (vmw_fpriv (file_priv )-> tfile ,
1120
1094
handle , TTM_REF_USAGE );
0 commit comments